1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/parallel/gcTaskManager.hpp" 28 #include "gc/parallel/objectStartArray.hpp" 29 #include "gc/parallel/parMarkBitMap.hpp" 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/psCompactionManager.inline.hpp" 32 #include "gc/parallel/psOldGen.hpp" 33 #include "gc/parallel/psParallelCompact.inline.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "logging/log.hpp" 36 #include "memory/iterator.inline.hpp" 37 #include "oops/instanceKlass.inline.hpp" 38 #include "oops/instanceMirrorKlass.inline.hpp" 39 #include "oops/objArrayKlass.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/valueArrayKlass.inline.hpp" 42 #include "runtime/atomic.inline.hpp" 43 44 PSOldGen* ParCompactionManager::_old_gen = NULL; 45 ParCompactionManager** ParCompactionManager::_manager_array = NULL; 46 47 RegionTaskQueue** ParCompactionManager::_region_list = NULL; 48 49 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; 50 ParCompactionManager::ObjArrayTaskQueueSet* 51 ParCompactionManager::_objarray_queues = NULL; 52 ObjectStartArray* ParCompactionManager::_start_array = NULL; 53 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; 54 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; 55 56 uint* ParCompactionManager::_recycled_stack_index = NULL; 57 int ParCompactionManager::_recycled_top = -1; 58 int ParCompactionManager::_recycled_bottom = -1; 59 60 ParCompactionManager::ParCompactionManager() : 61 _action(CopyAndUpdate), 62 _region_stack(NULL), 63 _region_stack_index((uint)max_uintx) { 64 65 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 66 67 _old_gen = heap->old_gen(); 68 _start_array = old_gen()->start_array(); 69 70 marking_stack()->initialize(); 71 _objarray_stack.initialize(); 72 73 reset_bitmap_query_cache(); 74 } 75 76 ParCompactionManager::~ParCompactionManager() { 77 delete _recycled_stack_index; 78 } 79 80 void ParCompactionManager::initialize(ParMarkBitMap* mbm) { 81 assert(PSParallelCompact::gc_task_manager() != NULL, 82 "Needed for initialization"); 83 84 _mark_bitmap = mbm; 85 86 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers(); 87 88 assert(_manager_array == NULL, "Attempt to initialize twice"); 89 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); 90 guarantee(_manager_array != NULL, "Could not allocate manager_array"); 91 92 _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*, 93 parallel_gc_threads+1, mtGC); 94 guarantee(_region_list != NULL, "Could not initialize promotion manager"); 95 96 _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC); 97 98 // parallel_gc-threads + 1 to be consistent with the number of 99 // compaction managers. 100 for(uint i=0; i<parallel_gc_threads + 1; i++) { 101 _region_list[i] = new RegionTaskQueue(); 102 region_list(i)->initialize(); 103 } 104 105 _stack_array = new OopTaskQueueSet(parallel_gc_threads); 106 guarantee(_stack_array != NULL, "Could not allocate stack_array"); 107 _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads); 108 guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues"); 109 _region_array = new RegionTaskQueueSet(parallel_gc_threads); 110 guarantee(_region_array != NULL, "Could not allocate region_array"); 111 112 // Create and register the ParCompactionManager(s) for the worker threads. 113 for(uint i=0; i<parallel_gc_threads; i++) { 114 _manager_array[i] = new ParCompactionManager(); 115 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); 116 stack_array()->register_queue(i, _manager_array[i]->marking_stack()); 117 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack); 118 region_array()->register_queue(i, region_list(i)); 119 } 120 121 // The VMThread gets its own ParCompactionManager, which is not available 122 // for work stealing. 123 _manager_array[parallel_gc_threads] = new ParCompactionManager(); 124 guarantee(_manager_array[parallel_gc_threads] != NULL, 125 "Could not create ParCompactionManager"); 126 assert(PSParallelCompact::gc_task_manager()->workers() != 0, 127 "Not initialized?"); 128 } 129 130 void ParCompactionManager::reset_all_bitmap_query_caches() { 131 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers(); 132 for (uint i=0; i<=parallel_gc_threads; i++) { 133 _manager_array[i]->reset_bitmap_query_cache(); 134 } 135 } 136 137 int ParCompactionManager::pop_recycled_stack_index() { 138 assert(_recycled_bottom <= _recycled_top, "list is empty"); 139 // Get the next available index 140 if (_recycled_bottom < _recycled_top) { 141 uint cur, next, last; 142 do { 143 cur = _recycled_bottom; 144 next = cur + 1; 145 last = Atomic::cmpxchg(next, &_recycled_bottom, cur); 146 } while (cur != last); 147 return _recycled_stack_index[next]; 148 } else { 149 return -1; 150 } 151 } 152 153 void ParCompactionManager::push_recycled_stack_index(uint v) { 154 // Get the next available index 155 int cur = Atomic::add(1, &_recycled_top); 156 _recycled_stack_index[cur] = v; 157 assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong"); 158 } 159 160 bool ParCompactionManager::should_update() { 161 assert(action() != NotValid, "Action is not set"); 162 return (action() == ParCompactionManager::Update) || 163 (action() == ParCompactionManager::CopyAndUpdate) || 164 (action() == ParCompactionManager::UpdateAndCopy); 165 } 166 167 bool ParCompactionManager::should_copy() { 168 assert(action() != NotValid, "Action is not set"); 169 return (action() == ParCompactionManager::Copy) || 170 (action() == ParCompactionManager::CopyAndUpdate) || 171 (action() == ParCompactionManager::UpdateAndCopy); 172 } 173 174 void ParCompactionManager::region_list_push(uint list_index, 175 size_t region_index) { 176 region_list(list_index)->push(region_index); 177 } 178 179 void ParCompactionManager::verify_region_list_empty(uint list_index) { 180 assert(region_list(list_index)->is_empty(), "Not empty"); 181 } 182 183 ParCompactionManager* 184 ParCompactionManager::gc_thread_compaction_manager(uint index) { 185 assert(index < ParallelGCThreads, "index out of range"); 186 assert(_manager_array != NULL, "Sanity"); 187 return _manager_array[index]; 188 } 189 190 void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 191 assert(obj != NULL, "can't follow the content of NULL object"); 192 193 cm->follow_klass(this); 194 // Only mark the header and let the scan of the meta-data mark 195 // everything else. 196 197 ParCompactionManager::MarkAndPushClosure cl(cm); 198 InstanceKlass::oop_oop_iterate_oop_maps<true>(obj, &cl); 199 } 200 201 void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 202 InstanceKlass::oop_pc_follow_contents(obj, cm); 203 204 // Follow the klass field in the mirror. 205 Klass* klass = java_lang_Class::as_Klass(obj); 206 if (klass != NULL) { 207 // An anonymous class doesn't have its own class loader, so the call 208 // to follow_klass will mark and push its java mirror instead of the 209 // class loader. When handling the java mirror for an anonymous class 210 // we need to make sure its class loader data is claimed, this is done 211 // by calling follow_class_loader explicitly. For non-anonymous classes 212 // the call to follow_class_loader is made when the class loader itself 213 // is handled. 214 if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) { 215 cm->follow_class_loader(klass->class_loader_data()); 216 } else { 217 cm->follow_klass(klass); 218 } 219 } else { 220 // If klass is NULL then this a mirror for a primitive type. 221 // We don't have to follow them, since they are handled as strong 222 // roots in Universe::oops_do. 223 assert(java_lang_Class::is_primitive(obj), "Sanity check"); 224 } 225 226 ParCompactionManager::MarkAndPushClosure cl(cm); 227 oop_oop_iterate_statics<true>(obj, &cl); 228 } 229 230 void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 231 InstanceKlass::oop_pc_follow_contents(obj, cm); 232 233 ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); 234 if (loader_data != NULL) { 235 cm->follow_class_loader(loader_data); 236 } 237 } 238 239 template <class T> 240 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) { 241 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); 242 T heap_oop = oopDesc::load_heap_oop(referent_addr); 243 log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj)); 244 if (!oopDesc::is_null(heap_oop)) { 245 oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); 246 if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) && 247 PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) { 248 // reference already enqueued, referent will be traversed later 249 klass->InstanceKlass::oop_pc_follow_contents(obj, cm); 250 log_develop_trace(gc, ref)(" Non NULL enqueued " PTR_FORMAT, p2i(obj)); 251 return; 252 } else { 253 // treat referent as normal oop 254 log_develop_trace(gc, ref)(" Non NULL normal " PTR_FORMAT, p2i(obj)); 255 cm->mark_and_push(referent_addr); 256 } 257 } 258 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); 259 // Treat discovered as normal oop, if ref is not "active", 260 // i.e. if next is non-NULL. 261 T next_oop = oopDesc::load_heap_oop(next_addr); 262 if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" 263 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); 264 log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr)); 265 cm->mark_and_push(discovered_addr); 266 } 267 cm->mark_and_push(next_addr); 268 klass->InstanceKlass::oop_pc_follow_contents(obj, cm); 269 } 270 271 272 void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 273 if (UseCompressedOops) { 274 oop_pc_follow_contents_specialized<narrowOop>(this, obj, cm); 275 } else { 276 oop_pc_follow_contents_specialized<oop>(this, obj, cm); 277 } 278 } 279 280 void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 281 cm->follow_klass(this); 282 283 if (UseCompressedOops) { 284 oop_pc_follow_contents_specialized<narrowOop>(objArrayOop(obj), 0, cm); 285 } else { 286 oop_pc_follow_contents_specialized<oop>(objArrayOop(obj), 0, cm); 287 } 288 } 289 290 void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 291 assert(obj->is_typeArray(),"must be a type array"); 292 // Performance tweak: We skip iterating over the klass pointer since we 293 // know that Universe::TypeArrayKlass never moves. 294 } 295 296 void ValueArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { 297 assert(obj->is_valueArray(),"must be a value array"); 298 cm->follow_klass(this); 299 if (contains_oops()) { // CMH: parallel version (like objArrayTask) missing, treat as single obj for now 300 ParCompactionManager::MarkAndPushClosure cl(cm); 301 ValueArrayKlass::oop_oop_iterate_elements<true>(valueArrayOop(obj), &cl); 302 } 303 } 304 305 void ParCompactionManager::follow_marking_stacks() { 306 do { 307 // Drain the overflow stack first, to allow stealing from the marking stack. 308 oop obj; 309 while (marking_stack()->pop_overflow(obj)) { 310 follow_contents(obj); 311 } 312 while (marking_stack()->pop_local(obj)) { 313 follow_contents(obj); 314 } 315 316 // Process ObjArrays one at a time to avoid marking stack bloat. 317 ObjArrayTask task; 318 if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) { 319 follow_contents((objArrayOop)task.obj(), task.index()); 320 } 321 } while (!marking_stacks_empty()); 322 323 assert(marking_stacks_empty(), "Sanity"); 324 } 325 326 void ParCompactionManager::drain_region_stacks() { 327 do { 328 // Drain overflow stack first so other threads can steal. 329 size_t region_index; 330 while (region_stack()->pop_overflow(region_index)) { 331 PSParallelCompact::fill_and_update_region(this, region_index); 332 } 333 334 while (region_stack()->pop_local(region_index)) { 335 PSParallelCompact::fill_and_update_region(this, region_index); 336 } 337 } while (!region_stack()->is_empty()); 338 } --- EOF ---