1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1CollectorState.hpp" 33 #include "gc/g1/g1ConcurrentMark.inline.hpp" 34 #include "gc/g1/g1HeapVerifier.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1StringDedup.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/heapRegionSet.inline.hpp" 40 #include "gc/g1/suspendibleThreadSet.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/vmGCOperations.hpp" 50 #include "logging/log.hpp" 51 #include "memory/allocation.hpp" 52 #include "memory/resourceArea.hpp" 53 #include "oops/oop.inline.hpp" 54 #include "runtime/atomic.inline.hpp" 55 #include "runtime/handles.inline.hpp" 56 #include "runtime/java.hpp" 57 #include "runtime/prefetch.inline.hpp" 58 #include "services/memTracker.hpp" 59 60 // Concurrent marking bit map wrapper 61 62 G1CMBitMapRO::G1CMBitMapRO(int shifter) : 63 _bm(), 64 _shifter(shifter) { 65 _bmStartWord = 0; 66 _bmWordSize = 0; 67 } 68 69 HeapWord* G1CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, 70 const HeapWord* limit) const { 71 // First we must round addr *up* to a possible object boundary. 72 addr = (HeapWord*)align_size_up((intptr_t)addr, 73 HeapWordSize << _shifter); 74 size_t addrOffset = heapWordToOffset(addr); 75 assert(limit != NULL, "limit must not be NULL"); 76 size_t limitOffset = heapWordToOffset(limit); 77 size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); 78 HeapWord* nextAddr = offsetToHeapWord(nextOffset); 79 assert(nextAddr >= addr, "get_next_one postcondition"); 80 assert(nextAddr == limit || isMarked(nextAddr), 81 "get_next_one postcondition"); 82 return nextAddr; 83 } 84 85 #ifndef PRODUCT 86 bool G1CMBitMapRO::covers(MemRegion heap_rs) const { 87 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 88 assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, 89 "size inconsistency"); 90 return _bmStartWord == (HeapWord*)(heap_rs.start()) && 91 _bmWordSize == heap_rs.word_size(); 92 } 93 #endif 94 95 void G1CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const { 96 _bm.print_on_error(st, prefix); 97 } 98 99 size_t G1CMBitMap::compute_size(size_t heap_size) { 100 return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); 101 } 102 103 size_t G1CMBitMap::mark_distance() { 104 return MinObjAlignmentInBytes * BitsPerByte; 105 } 106 107 void G1CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { 108 _bmStartWord = heap.start(); 109 _bmWordSize = heap.word_size(); 110 111 _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); 112 _bm.set_size(_bmWordSize >> _shifter); 113 114 storage->set_mapping_changed_listener(&_listener); 115 } 116 117 void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) { 118 if (zero_filled) { 119 return; 120 } 121 // We need to clear the bitmap on commit, removing any existing information. 122 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); 123 _bm->clearRange(mr); 124 } 125 126 // Closure used for clearing the given mark bitmap. 127 class ClearBitmapHRClosure : public HeapRegionClosure { 128 private: 129 G1ConcurrentMark* _cm; 130 G1CMBitMap* _bitmap; 131 bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. 132 public: 133 ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { 134 assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); 135 } 136 137 virtual bool doHeapRegion(HeapRegion* r) { 138 size_t const chunk_size_in_words = M / HeapWordSize; 139 140 HeapWord* cur = r->bottom(); 141 HeapWord* const end = r->end(); 142 143 while (cur < end) { 144 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 145 _bitmap->clearRange(mr); 146 147 cur += chunk_size_in_words; 148 149 // Abort iteration if after yielding the marking has been aborted. 150 if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { 151 return true; 152 } 153 // Repeat the asserts from before the start of the closure. We will do them 154 // as asserts here to minimize their overhead on the product. However, we 155 // will have them as guarantees at the beginning / end of the bitmap 156 // clearing to get some checking in the product. 157 assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); 158 assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 159 } 160 161 return false; 162 } 163 }; 164 165 class ParClearNextMarkBitmapTask : public AbstractGangTask { 166 ClearBitmapHRClosure* _cl; 167 HeapRegionClaimer _hrclaimer; 168 bool _suspendible; // If the task is suspendible, workers must join the STS. 169 170 public: 171 ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) : 172 _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {} 173 174 void work(uint worker_id) { 175 SuspendibleThreadSetJoiner sts_join(_suspendible); 176 G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true); 177 } 178 }; 179 180 void G1CMBitMap::clearAll() { 181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); 183 uint n_workers = g1h->workers()->active_workers(); 184 ParClearNextMarkBitmapTask task(&cl, n_workers, false); 185 g1h->workers()->run_task(&task); 186 guarantee(cl.complete(), "Must have completed iteration."); 187 return; 188 } 189 190 void G1CMBitMap::clearRange(MemRegion mr) { 191 mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 192 assert(!mr.is_empty(), "unexpected empty region"); 193 // convert address range into offset range 194 _bm.at_put_range(heapWordToOffset(mr.start()), 195 heapWordToOffset(mr.end()), false); 196 } 197 198 G1CMMarkStack::G1CMMarkStack(G1ConcurrentMark* cm) : 199 _base(NULL), _cm(cm) 200 {} 201 202 bool G1CMMarkStack::allocate(size_t capacity) { 203 // allocate a stack of the requisite depth 204 ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop))); 205 if (!rs.is_reserved()) { 206 warning("ConcurrentMark MarkStack allocation failure"); 207 return false; 208 } 209 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 210 if (!_virtual_space.initialize(rs, rs.size())) { 211 warning("ConcurrentMark MarkStack backing store failure"); 212 // Release the virtual memory reserved for the marking stack 213 rs.release(); 214 return false; 215 } 216 assert(_virtual_space.committed_size() == rs.size(), 217 "Didn't reserve backing store for all of G1ConcurrentMark stack?"); 218 _base = (oop*) _virtual_space.low(); 219 setEmpty(); 220 _capacity = (jint) capacity; 221 _saved_index = -1; 222 _should_expand = false; 223 return true; 224 } 225 226 void G1CMMarkStack::expand() { 227 // Called, during remark, if we've overflown the marking stack during marking. 228 assert(isEmpty(), "stack should been emptied while handling overflow"); 229 assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted"); 230 // Clear expansion flag 231 _should_expand = false; 232 if (_capacity == (jint) MarkStackSizeMax) { 233 log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); 234 return; 235 } 236 // Double capacity if possible 237 jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax); 238 // Do not give up existing stack until we have managed to 239 // get the double capacity that we desired. 240 ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity * 241 sizeof(oop))); 242 if (rs.is_reserved()) { 243 // Release the backing store associated with old stack 244 _virtual_space.release(); 245 // Reinitialize virtual space for new stack 246 if (!_virtual_space.initialize(rs, rs.size())) { 247 fatal("Not enough swap for expanded marking stack capacity"); 248 } 249 _base = (oop*)(_virtual_space.low()); 250 _index = 0; 251 _capacity = new_capacity; 252 } else { 253 // Failed to double capacity, continue; 254 log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 255 _capacity / K, new_capacity / K); 256 } 257 } 258 259 void G1CMMarkStack::set_should_expand() { 260 // If we're resetting the marking state because of an 261 // marking stack overflow, record that we should, if 262 // possible, expand the stack. 263 _should_expand = _cm->has_overflown(); 264 } 265 266 G1CMMarkStack::~G1CMMarkStack() { 267 if (_base != NULL) { 268 _base = NULL; 269 _virtual_space.release(); 270 } 271 } 272 273 void G1CMMarkStack::par_push_arr(oop* ptr_arr, int n) { 274 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 275 jint start = _index; 276 jint next_index = start + n; 277 if (next_index > _capacity) { 278 _overflow = true; 279 return; 280 } 281 // Otherwise. 282 _index = next_index; 283 for (int i = 0; i < n; i++) { 284 int ind = start + i; 285 assert(ind < _capacity, "By overflow test above."); 286 _base[ind] = ptr_arr[i]; 287 } 288 } 289 290 bool G1CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) { 291 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 292 jint index = _index; 293 if (index == 0) { 294 *n = 0; 295 return false; 296 } else { 297 int k = MIN2(max, index); 298 jint new_ind = index - k; 299 for (int j = 0; j < k; j++) { 300 ptr_arr[j] = _base[new_ind + j]; 301 } 302 _index = new_ind; 303 *n = k; 304 return true; 305 } 306 } 307 308 void G1CMMarkStack::note_start_of_gc() { 309 assert(_saved_index == -1, 310 "note_start_of_gc()/end_of_gc() bracketed incorrectly"); 311 _saved_index = _index; 312 } 313 314 void G1CMMarkStack::note_end_of_gc() { 315 // This is intentionally a guarantee, instead of an assert. If we 316 // accidentally add something to the mark stack during GC, it 317 // will be a correctness issue so it's better if we crash. we'll 318 // only check this once per GC anyway, so it won't be a performance 319 // issue in any way. 320 guarantee(_saved_index == _index, 321 "saved index: %d index: %d", _saved_index, _index); 322 _saved_index = -1; 323 } 324 325 G1CMRootRegions::G1CMRootRegions() : 326 _young_list(NULL), _cm(NULL), _scan_in_progress(false), 327 _should_abort(false), _next_survivor(NULL) { } 328 329 void G1CMRootRegions::init(G1CollectedHeap* g1h, G1ConcurrentMark* cm) { 330 _young_list = g1h->young_list(); 331 _cm = cm; 332 } 333 334 void G1CMRootRegions::prepare_for_scan() { 335 assert(!scan_in_progress(), "pre-condition"); 336 337 // Currently, only survivors can be root regions. 338 assert(_next_survivor == NULL, "pre-condition"); 339 _next_survivor = _young_list->first_survivor_region(); 340 _scan_in_progress = (_next_survivor != NULL); 341 _should_abort = false; 342 } 343 344 HeapRegion* G1CMRootRegions::claim_next() { 345 if (_should_abort) { 346 // If someone has set the should_abort flag, we return NULL to 347 // force the caller to bail out of their loop. 348 return NULL; 349 } 350 351 // Currently, only survivors can be root regions. 352 HeapRegion* res = _next_survivor; 353 if (res != NULL) { 354 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 355 // Read it again in case it changed while we were waiting for the lock. 356 res = _next_survivor; 357 if (res != NULL) { 358 if (res == _young_list->last_survivor_region()) { 359 // We just claimed the last survivor so store NULL to indicate 360 // that we're done. 361 _next_survivor = NULL; 362 } else { 363 _next_survivor = res->get_next_young_region(); 364 } 365 } else { 366 // Someone else claimed the last survivor while we were trying 367 // to take the lock so nothing else to do. 368 } 369 } 370 assert(res == NULL || res->is_survivor(), "post-condition"); 371 372 return res; 373 } 374 375 void G1CMRootRegions::notify_scan_done() { 376 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 377 _scan_in_progress = false; 378 RootRegionScan_lock->notify_all(); 379 } 380 381 void G1CMRootRegions::cancel_scan() { 382 notify_scan_done(); 383 } 384 385 void G1CMRootRegions::scan_finished() { 386 assert(scan_in_progress(), "pre-condition"); 387 388 // Currently, only survivors can be root regions. 389 if (!_should_abort) { 390 assert(_next_survivor == NULL, "we should have claimed all survivors"); 391 } 392 _next_survivor = NULL; 393 394 notify_scan_done(); 395 } 396 397 bool G1CMRootRegions::wait_until_scan_finished() { 398 if (!scan_in_progress()) return false; 399 400 { 401 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 402 while (scan_in_progress()) { 403 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 404 } 405 } 406 return true; 407 } 408 409 uint G1ConcurrentMark::scale_parallel_threads(uint n_par_threads) { 410 return MAX2((n_par_threads + 2) / 4, 1U); 411 } 412 413 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : 414 _g1h(g1h), 415 _markBitMap1(), 416 _markBitMap2(), 417 _parallel_marking_threads(0), 418 _max_parallel_marking_threads(0), 419 _sleep_factor(0.0), 420 _marking_task_overhead(1.0), 421 _cleanup_list("Cleanup List"), 422 _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), 423 _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> 424 CardTableModRefBS::card_shift, 425 false /* in_resource_area*/), 426 427 _prevMarkBitMap(&_markBitMap1), 428 _nextMarkBitMap(&_markBitMap2), 429 430 _markStack(this), 431 // _finger set in set_non_marking_state 432 433 _max_worker_id(ParallelGCThreads), 434 // _active_tasks set in set_non_marking_state 435 // _tasks set inside the constructor 436 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)), 437 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)), 438 439 _has_overflown(false), 440 _concurrent(false), 441 _has_aborted(false), 442 _restart_for_overflow(false), 443 _concurrent_marking_in_progress(false), 444 _concurrent_phase_started(false), 445 446 // _verbose_level set below 447 448 _init_times(), 449 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(), 450 _cleanup_times(), 451 _total_counting_time(0.0), 452 _total_rs_scrub_time(0.0), 453 454 _parallel_workers(NULL), 455 456 _count_card_bitmaps(NULL), 457 _count_marked_bytes(NULL), 458 _completed_initialization(false) { 459 460 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); 461 _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); 462 463 // Create & start a ConcurrentMark thread. 464 _cmThread = new ConcurrentMarkThread(this); 465 assert(cmThread() != NULL, "CM Thread should have been created"); 466 assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm"); 467 if (_cmThread->osthread() == NULL) { 468 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 469 } 470 471 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 472 assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); 473 assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); 474 475 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 476 satb_qs.set_buffer_size(G1SATBBufferSize); 477 478 _root_regions.init(_g1h, this); 479 480 if (ConcGCThreads > ParallelGCThreads) { 481 warning("Can't have more ConcGCThreads (%u) " 482 "than ParallelGCThreads (%u).", 483 ConcGCThreads, ParallelGCThreads); 484 return; 485 } 486 if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) { 487 // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent 488 // if both are set 489 _sleep_factor = 0.0; 490 _marking_task_overhead = 1.0; 491 } else if (G1MarkingOverheadPercent > 0) { 492 // We will calculate the number of parallel marking threads based 493 // on a target overhead with respect to the soft real-time goal 494 double marking_overhead = (double) G1MarkingOverheadPercent / 100.0; 495 double overall_cm_overhead = 496 (double) MaxGCPauseMillis * marking_overhead / 497 (double) GCPauseIntervalMillis; 498 double cpu_ratio = 1.0 / (double) os::processor_count(); 499 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 500 double marking_task_overhead = 501 overall_cm_overhead / marking_thread_num * 502 (double) os::processor_count(); 503 double sleep_factor = 504 (1.0 - marking_task_overhead) / marking_task_overhead; 505 506 FLAG_SET_ERGO(uint, ConcGCThreads, (uint) marking_thread_num); 507 _sleep_factor = sleep_factor; 508 _marking_task_overhead = marking_task_overhead; 509 } else { 510 // Calculate the number of parallel marking threads by scaling 511 // the number of parallel GC threads. 512 uint marking_thread_num = scale_parallel_threads(ParallelGCThreads); 513 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 514 _sleep_factor = 0.0; 515 _marking_task_overhead = 1.0; 516 } 517 518 assert(ConcGCThreads > 0, "Should have been set"); 519 _parallel_marking_threads = ConcGCThreads; 520 _max_parallel_marking_threads = _parallel_marking_threads; 521 522 _parallel_workers = new WorkGang("G1 Marker", 523 _max_parallel_marking_threads, false, true); 524 if (_parallel_workers == NULL) { 525 vm_exit_during_initialization("Failed necessary allocation."); 526 } else { 527 _parallel_workers->initialize_workers(); 528 } 529 530 if (FLAG_IS_DEFAULT(MarkStackSize)) { 531 size_t mark_stack_size = 532 MIN2(MarkStackSizeMax, 533 MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE))); 534 // Verify that the calculated value for MarkStackSize is in range. 535 // It would be nice to use the private utility routine from Arguments. 536 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 537 warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 538 "must be between 1 and " SIZE_FORMAT, 539 mark_stack_size, MarkStackSizeMax); 540 return; 541 } 542 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 543 } else { 544 // Verify MarkStackSize is in range. 545 if (FLAG_IS_CMDLINE(MarkStackSize)) { 546 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 547 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 548 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 549 "must be between 1 and " SIZE_FORMAT, 550 MarkStackSize, MarkStackSizeMax); 551 return; 552 } 553 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 554 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 555 warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 556 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 557 MarkStackSize, MarkStackSizeMax); 558 return; 559 } 560 } 561 } 562 } 563 564 if (!_markStack.allocate(MarkStackSize)) { 565 warning("Failed to allocate CM marking stack"); 566 return; 567 } 568 569 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); 570 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC); 571 572 _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_worker_id, mtGC); 573 _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC); 574 575 BitMap::idx_t card_bm_size = _card_bm.size(); 576 577 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 578 _active_tasks = _max_worker_id; 579 580 uint max_regions = _g1h->max_regions(); 581 for (uint i = 0; i < _max_worker_id; ++i) { 582 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 583 task_queue->initialize(); 584 _task_queues->register_queue(i, task_queue); 585 586 _count_card_bitmaps[i] = BitMap(card_bm_size, false); 587 _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 588 589 _tasks[i] = new G1CMTask(i, this, 590 _count_marked_bytes[i], 591 &_count_card_bitmaps[i], 592 task_queue, _task_queues); 593 594 _accum_task_vtime[i] = 0.0; 595 } 596 597 // Calculate the card number for the bottom of the heap. Used 598 // in biasing indexes into the accounting card bitmaps. 599 _heap_bottom_card_num = 600 intptr_t(uintptr_t(_g1h->reserved_region().start()) >> 601 CardTableModRefBS::card_shift); 602 603 // Clear all the liveness counting data 604 clear_all_count_data(); 605 606 // so that the call below can read a sensible value 607 _heap_start = g1h->reserved_region().start(); 608 set_non_marking_state(); 609 _completed_initialization = true; 610 } 611 612 void G1ConcurrentMark::reset() { 613 // Starting values for these two. This should be called in a STW 614 // phase. 615 MemRegion reserved = _g1h->g1_reserved(); 616 _heap_start = reserved.start(); 617 _heap_end = reserved.end(); 618 619 // Separated the asserts so that we know which one fires. 620 assert(_heap_start != NULL, "heap bounds should look ok"); 621 assert(_heap_end != NULL, "heap bounds should look ok"); 622 assert(_heap_start < _heap_end, "heap bounds should look ok"); 623 624 // Reset all the marking data structures and any necessary flags 625 reset_marking_state(); 626 627 // We do reset all of them, since different phases will use 628 // different number of active threads. So, it's easiest to have all 629 // of them ready. 630 for (uint i = 0; i < _max_worker_id; ++i) { 631 _tasks[i]->reset(_nextMarkBitMap); 632 } 633 634 // we need this to make sure that the flag is on during the evac 635 // pause with initial mark piggy-backed 636 set_concurrent_marking_in_progress(); 637 } 638 639 640 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) { 641 _markStack.set_should_expand(); 642 _markStack.setEmpty(); // Also clears the _markStack overflow flag 643 if (clear_overflow) { 644 clear_has_overflown(); 645 } else { 646 assert(has_overflown(), "pre-condition"); 647 } 648 _finger = _heap_start; 649 650 for (uint i = 0; i < _max_worker_id; ++i) { 651 G1CMTaskQueue* queue = _task_queues->queue(i); 652 queue->set_empty(); 653 } 654 } 655 656 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 657 assert(active_tasks <= _max_worker_id, "we should not have more"); 658 659 _active_tasks = active_tasks; 660 // Need to update the three data structures below according to the 661 // number of active threads for this phase. 662 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 663 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 664 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 665 } 666 667 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 668 set_concurrency(active_tasks); 669 670 _concurrent = concurrent; 671 // We propagate this to all tasks, not just the active ones. 672 for (uint i = 0; i < _max_worker_id; ++i) 673 _tasks[i]->set_concurrent(concurrent); 674 675 if (concurrent) { 676 set_concurrent_marking_in_progress(); 677 } else { 678 // We currently assume that the concurrent flag has been set to 679 // false before we start remark. At this point we should also be 680 // in a STW phase. 681 assert(!concurrent_marking_in_progress(), "invariant"); 682 assert(out_of_regions(), 683 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 684 p2i(_finger), p2i(_heap_end)); 685 } 686 } 687 688 void G1ConcurrentMark::set_non_marking_state() { 689 // We set the global marking state to some default values when we're 690 // not doing marking. 691 reset_marking_state(); 692 _active_tasks = 0; 693 clear_concurrent_marking_in_progress(); 694 } 695 696 G1ConcurrentMark::~G1ConcurrentMark() { 697 // The G1ConcurrentMark instance is never freed. 698 ShouldNotReachHere(); 699 } 700 701 void G1ConcurrentMark::clearNextBitmap() { 702 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 703 704 // Make sure that the concurrent mark thread looks to still be in 705 // the current cycle. 706 guarantee(cmThread()->during_cycle(), "invariant"); 707 708 // We are finishing up the current cycle by clearing the next 709 // marking bitmap and getting it ready for the next cycle. During 710 // this time no other cycle can start. So, let's make sure that this 711 // is the case. 712 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 713 714 ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); 715 ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true); 716 _parallel_workers->run_task(&task); 717 718 // Clear the liveness counting data. If the marking has been aborted, the abort() 719 // call already did that. 720 if (cl.complete()) { 721 clear_all_count_data(); 722 } 723 724 // Repeat the asserts from above. 725 guarantee(cmThread()->during_cycle(), "invariant"); 726 guarantee(!g1h->collector_state()->mark_in_progress(), "invariant"); 727 } 728 729 class CheckBitmapClearHRClosure : public HeapRegionClosure { 730 G1CMBitMap* _bitmap; 731 bool _error; 732 public: 733 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 734 } 735 736 virtual bool doHeapRegion(HeapRegion* r) { 737 // This closure can be called concurrently to the mutator, so we must make sure 738 // that the result of the getNextMarkedWordAddress() call is compared to the 739 // value passed to it as limit to detect any found bits. 740 // end never changes in G1. 741 HeapWord* end = r->end(); 742 return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end; 743 } 744 }; 745 746 bool G1ConcurrentMark::nextMarkBitmapIsClear() { 747 CheckBitmapClearHRClosure cl(_nextMarkBitMap); 748 _g1h->heap_region_iterate(&cl); 749 return cl.complete(); 750 } 751 752 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 753 public: 754 bool doHeapRegion(HeapRegion* r) { 755 r->note_start_of_marking(); 756 return false; 757 } 758 }; 759 760 void G1ConcurrentMark::checkpointRootsInitialPre() { 761 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 762 G1CollectorPolicy* g1p = g1h->g1_policy(); 763 764 _has_aborted = false; 765 766 // Initialize marking structures. This has to be done in a STW phase. 767 reset(); 768 769 // For each region note start of marking. 770 NoteStartOfMarkHRClosure startcl; 771 g1h->heap_region_iterate(&startcl); 772 } 773 774 775 void G1ConcurrentMark::checkpointRootsInitialPost() { 776 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 777 778 // Start Concurrent Marking weak-reference discovery. 779 ReferenceProcessor* rp = g1h->ref_processor_cm(); 780 // enable ("weak") refs discovery 781 rp->enable_discovery(); 782 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 783 784 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 785 // This is the start of the marking cycle, we're expected all 786 // threads to have SATB queues with active set to false. 787 satb_mq_set.set_active_all_threads(true, /* new active value */ 788 false /* expected_active */); 789 790 _root_regions.prepare_for_scan(); 791 792 // update_g1_committed() will be called at the end of an evac pause 793 // when marking is on. So, it's also called at the end of the 794 // initial-mark pause to update the heap end, if the heap expands 795 // during it. No need to call it here. 796 } 797 798 /* 799 * Notice that in the next two methods, we actually leave the STS 800 * during the barrier sync and join it immediately afterwards. If we 801 * do not do this, the following deadlock can occur: one thread could 802 * be in the barrier sync code, waiting for the other thread to also 803 * sync up, whereas another one could be trying to yield, while also 804 * waiting for the other threads to sync up too. 805 * 806 * Note, however, that this code is also used during remark and in 807 * this case we should not attempt to leave / enter the STS, otherwise 808 * we'll either hit an assert (debug / fastdebug) or deadlock 809 * (product). So we should only leave / enter the STS if we are 810 * operating concurrently. 811 * 812 * Because the thread that does the sync barrier has left the STS, it 813 * is possible to be suspended for a Full GC or an evacuation pause 814 * could occur. This is actually safe, since the entering the sync 815 * barrier is one of the last things do_marking_step() does, and it 816 * doesn't manipulate any data structures afterwards. 817 */ 818 819 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 820 bool barrier_aborted; 821 { 822 SuspendibleThreadSetLeaver sts_leave(concurrent()); 823 barrier_aborted = !_first_overflow_barrier_sync.enter(); 824 } 825 826 // at this point everyone should have synced up and not be doing any 827 // more work 828 829 if (barrier_aborted) { 830 // If the barrier aborted we ignore the overflow condition and 831 // just abort the whole marking phase as quickly as possible. 832 return; 833 } 834 835 // If we're executing the concurrent phase of marking, reset the marking 836 // state; otherwise the marking state is reset after reference processing, 837 // during the remark pause. 838 // If we reset here as a result of an overflow during the remark we will 839 // see assertion failures from any subsequent set_concurrency_and_phase() 840 // calls. 841 if (concurrent()) { 842 // let the task associated with with worker 0 do this 843 if (worker_id == 0) { 844 // task 0 is responsible for clearing the global data structures 845 // We should be here because of an overflow. During STW we should 846 // not clear the overflow flag since we rely on it being true when 847 // we exit this method to abort the pause and restart concurrent 848 // marking. 849 reset_marking_state(true /* clear_overflow */); 850 851 log_info(gc)("Concurrent Mark reset for overflow"); 852 } 853 } 854 855 // after this, each task should reset its own data structures then 856 // then go into the second barrier 857 } 858 859 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 860 SuspendibleThreadSetLeaver sts_leave(concurrent()); 861 _second_overflow_barrier_sync.enter(); 862 863 // at this point everything should be re-initialized and ready to go 864 } 865 866 class G1CMConcurrentMarkingTask: public AbstractGangTask { 867 private: 868 G1ConcurrentMark* _cm; 869 ConcurrentMarkThread* _cmt; 870 871 public: 872 void work(uint worker_id) { 873 assert(Thread::current()->is_ConcurrentGC_thread(), 874 "this should only be done by a conc GC thread"); 875 ResourceMark rm; 876 877 double start_vtime = os::elapsedVTime(); 878 879 { 880 SuspendibleThreadSetJoiner sts_join; 881 882 assert(worker_id < _cm->active_tasks(), "invariant"); 883 G1CMTask* the_task = _cm->task(worker_id); 884 the_task->record_start_time(); 885 if (!_cm->has_aborted()) { 886 do { 887 double start_vtime_sec = os::elapsedVTime(); 888 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 889 890 the_task->do_marking_step(mark_step_duration_ms, 891 true /* do_termination */, 892 false /* is_serial*/); 893 894 double end_vtime_sec = os::elapsedVTime(); 895 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 896 _cm->clear_has_overflown(); 897 898 _cm->do_yield_check(worker_id); 899 900 jlong sleep_time_ms; 901 if (!_cm->has_aborted() && the_task->has_aborted()) { 902 sleep_time_ms = 903 (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0); 904 { 905 SuspendibleThreadSetLeaver sts_leave; 906 os::sleep(Thread::current(), sleep_time_ms, false); 907 } 908 } 909 } while (!_cm->has_aborted() && the_task->has_aborted()); 910 } 911 the_task->record_end_time(); 912 guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant"); 913 } 914 915 double end_vtime = os::elapsedVTime(); 916 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 917 } 918 919 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 920 ConcurrentMarkThread* cmt) : 921 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 922 923 ~G1CMConcurrentMarkingTask() { } 924 }; 925 926 // Calculates the number of active workers for a concurrent 927 // phase. 928 uint G1ConcurrentMark::calc_parallel_marking_threads() { 929 uint n_conc_workers = 0; 930 if (!UseDynamicNumberOfGCThreads || 931 (!FLAG_IS_DEFAULT(ConcGCThreads) && 932 !ForceDynamicNumberOfGCThreads)) { 933 n_conc_workers = max_parallel_marking_threads(); 934 } else { 935 n_conc_workers = 936 AdaptiveSizePolicy::calc_default_active_workers( 937 max_parallel_marking_threads(), 938 1, /* Minimum workers */ 939 parallel_marking_threads(), 940 Threads::number_of_non_daemon_threads()); 941 // Don't scale down "n_conc_workers" by scale_parallel_threads() because 942 // that scaling has already gone into "_max_parallel_marking_threads". 943 } 944 assert(n_conc_workers > 0, "Always need at least 1"); 945 return n_conc_workers; 946 } 947 948 void G1ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) { 949 // Currently, only survivors can be root regions. 950 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 951 G1RootRegionScanClosure cl(_g1h, this, worker_id); 952 953 const uintx interval = PrefetchScanIntervalInBytes; 954 HeapWord* curr = hr->bottom(); 955 const HeapWord* end = hr->top(); 956 while (curr < end) { 957 Prefetch::read(curr, interval); 958 oop obj = oop(curr); 959 int size = obj->oop_iterate_size(&cl); 960 assert(size == obj->size(), "sanity"); 961 curr += size; 962 } 963 } 964 965 class G1CMRootRegionScanTask : public AbstractGangTask { 966 private: 967 G1ConcurrentMark* _cm; 968 969 public: 970 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 971 AbstractGangTask("Root Region Scan"), _cm(cm) { } 972 973 void work(uint worker_id) { 974 assert(Thread::current()->is_ConcurrentGC_thread(), 975 "this should only be done by a conc GC thread"); 976 977 G1CMRootRegions* root_regions = _cm->root_regions(); 978 HeapRegion* hr = root_regions->claim_next(); 979 while (hr != NULL) { 980 _cm->scanRootRegion(hr, worker_id); 981 hr = root_regions->claim_next(); 982 } 983 } 984 }; 985 986 void G1ConcurrentMark::scanRootRegions() { 987 // scan_in_progress() will have been set to true only if there was 988 // at least one root region to scan. So, if it's false, we 989 // should not attempt to do any further work. 990 if (root_regions()->scan_in_progress()) { 991 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 992 GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); 993 994 _parallel_marking_threads = calc_parallel_marking_threads(); 995 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 996 "Maximum number of marking threads exceeded"); 997 uint active_workers = MAX2(1U, parallel_marking_threads()); 998 999 G1CMRootRegionScanTask task(this); 1000 _parallel_workers->set_active_workers(active_workers); 1001 _parallel_workers->run_task(&task); 1002 1003 // It's possible that has_aborted() is true here without actually 1004 // aborting the survivor scan earlier. This is OK as it's 1005 // mainly used for sanity checking. 1006 root_regions()->scan_finished(); 1007 } 1008 } 1009 1010 void G1ConcurrentMark::register_concurrent_phase_start(const char* title) { 1011 assert(!_concurrent_phase_started, "Sanity"); 1012 _concurrent_phase_started = true; 1013 _g1h->gc_timer_cm()->register_gc_concurrent_start(title); 1014 } 1015 1016 void G1ConcurrentMark::register_concurrent_phase_end() { 1017 if (_concurrent_phase_started) { 1018 _concurrent_phase_started = false; 1019 _g1h->gc_timer_cm()->register_gc_concurrent_end(); 1020 } 1021 } 1022 1023 void G1ConcurrentMark::markFromRoots() { 1024 // we might be tempted to assert that: 1025 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 1026 // "inconsistent argument?"); 1027 // However that wouldn't be right, because it's possible that 1028 // a safepoint is indeed in progress as a younger generation 1029 // stop-the-world GC happens even as we mark in this generation. 1030 1031 _restart_for_overflow = false; 1032 1033 // _g1h has _n_par_threads 1034 _parallel_marking_threads = calc_parallel_marking_threads(); 1035 assert(parallel_marking_threads() <= max_parallel_marking_threads(), 1036 "Maximum number of marking threads exceeded"); 1037 1038 uint active_workers = MAX2(1U, parallel_marking_threads()); 1039 assert(active_workers > 0, "Should have been set"); 1040 1041 // Parallel task terminator is set in "set_concurrency_and_phase()" 1042 set_concurrency_and_phase(active_workers, true /* concurrent */); 1043 1044 G1CMConcurrentMarkingTask markingTask(this, cmThread()); 1045 _parallel_workers->set_active_workers(active_workers); 1046 _parallel_workers->run_task(&markingTask); 1047 print_stats(); 1048 } 1049 1050 void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1051 // world is stopped at this checkpoint 1052 assert(SafepointSynchronize::is_at_safepoint(), 1053 "world should be stopped"); 1054 1055 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1056 1057 // If a full collection has happened, we shouldn't do this. 1058 if (has_aborted()) { 1059 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1060 return; 1061 } 1062 1063 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1064 1065 if (VerifyDuringGC) { 1066 HandleMark hm; // handle scope 1067 g1h->prepare_for_verify(); 1068 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1069 } 1070 g1h->verifier()->check_bitmaps("Remark Start"); 1071 1072 G1CollectorPolicy* g1p = g1h->g1_policy(); 1073 g1p->record_concurrent_mark_remark_start(); 1074 1075 double start = os::elapsedTime(); 1076 1077 checkpointRootsFinalWork(); 1078 1079 double mark_work_end = os::elapsedTime(); 1080 1081 weakRefsWork(clear_all_soft_refs); 1082 1083 if (has_overflown()) { 1084 // Oops. We overflowed. Restart concurrent marking. 1085 _restart_for_overflow = true; 1086 log_develop_trace(gc)("Remark led to restart for overflow."); 1087 1088 // Verify the heap w.r.t. the previous marking bitmap. 1089 if (VerifyDuringGC) { 1090 HandleMark hm; // handle scope 1091 g1h->prepare_for_verify(); 1092 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1093 } 1094 1095 // Clear the marking state because we will be restarting 1096 // marking due to overflowing the global mark stack. 1097 reset_marking_state(); 1098 } else { 1099 { 1100 GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm()); 1101 1102 // Aggregate the per-task counting data that we have accumulated 1103 // while marking. 1104 aggregate_count_data(); 1105 } 1106 1107 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1108 // We're done with marking. 1109 // This is the end of the marking cycle, we're expected all 1110 // threads to have SATB queues with active set to true. 1111 satb_mq_set.set_active_all_threads(false, /* new active value */ 1112 true /* expected_active */); 1113 1114 if (VerifyDuringGC) { 1115 HandleMark hm; // handle scope 1116 g1h->prepare_for_verify(); 1117 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1118 } 1119 g1h->verifier()->check_bitmaps("Remark End"); 1120 assert(!restart_for_overflow(), "sanity"); 1121 // Completely reset the marking state since marking completed 1122 set_non_marking_state(); 1123 } 1124 1125 // Expand the marking stack, if we have to and if we can. 1126 if (_markStack.should_expand()) { 1127 _markStack.expand(); 1128 } 1129 1130 // Statistics 1131 double now = os::elapsedTime(); 1132 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1133 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1134 _remark_times.add((now - start) * 1000.0); 1135 1136 g1p->record_concurrent_mark_remark_end(); 1137 1138 G1CMIsAliveClosure is_alive(g1h); 1139 g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive); 1140 } 1141 1142 // Base class of the closures that finalize and verify the 1143 // liveness counting data. 1144 class G1CMCountDataClosureBase: public HeapRegionClosure { 1145 protected: 1146 G1CollectedHeap* _g1h; 1147 G1ConcurrentMark* _cm; 1148 CardTableModRefBS* _ct_bs; 1149 1150 BitMap* _region_bm; 1151 BitMap* _card_bm; 1152 1153 // Takes a region that's not empty (i.e., it has at least one 1154 // live object in it and sets its corresponding bit on the region 1155 // bitmap to 1. 1156 void set_bit_for_region(HeapRegion* hr) { 1157 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1158 _region_bm->par_at_put(index, true); 1159 } 1160 1161 public: 1162 G1CMCountDataClosureBase(G1CollectedHeap* g1h, 1163 BitMap* region_bm, BitMap* card_bm): 1164 _g1h(g1h), _cm(g1h->concurrent_mark()), 1165 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 1166 _region_bm(region_bm), _card_bm(card_bm) { } 1167 }; 1168 1169 // Closure that calculates the # live objects per region. Used 1170 // for verification purposes during the cleanup pause. 1171 class CalcLiveObjectsClosure: public G1CMCountDataClosureBase { 1172 G1CMBitMapRO* _bm; 1173 size_t _region_marked_bytes; 1174 1175 public: 1176 CalcLiveObjectsClosure(G1CMBitMapRO *bm, G1CollectedHeap* g1h, 1177 BitMap* region_bm, BitMap* card_bm) : 1178 G1CMCountDataClosureBase(g1h, region_bm, card_bm), 1179 _bm(bm), _region_marked_bytes(0) { } 1180 1181 bool doHeapRegion(HeapRegion* hr) { 1182 HeapWord* ntams = hr->next_top_at_mark_start(); 1183 HeapWord* start = hr->bottom(); 1184 1185 assert(start <= hr->end() && start <= ntams && ntams <= hr->end(), 1186 "Preconditions not met - " 1187 "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT, 1188 p2i(start), p2i(ntams), p2i(hr->end())); 1189 1190 // Find the first marked object at or after "start". 1191 start = _bm->getNextMarkedWordAddress(start, ntams); 1192 1193 size_t marked_bytes = 0; 1194 1195 while (start < ntams) { 1196 oop obj = oop(start); 1197 int obj_sz = obj->size(); 1198 HeapWord* obj_end = start + obj_sz; 1199 1200 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 1201 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end); 1202 1203 // Note: if we're looking at the last region in heap - obj_end 1204 // could be actually just beyond the end of the heap; end_idx 1205 // will then correspond to a (non-existent) card that is also 1206 // just beyond the heap. 1207 if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) { 1208 // end of object is not card aligned - increment to cover 1209 // all the cards spanned by the object 1210 end_idx += 1; 1211 } 1212 1213 // Set the bits in the card BM for the cards spanned by this object. 1214 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1215 1216 // Add the size of this object to the number of marked bytes. 1217 marked_bytes += (size_t)obj_sz * HeapWordSize; 1218 1219 // This will happen if we are handling a humongous object that spans 1220 // several heap regions. 1221 if (obj_end > hr->end()) { 1222 break; 1223 } 1224 // Find the next marked object after this one. 1225 start = _bm->getNextMarkedWordAddress(obj_end, ntams); 1226 } 1227 1228 // Mark the allocated-since-marking portion... 1229 HeapWord* top = hr->top(); 1230 if (ntams < top) { 1231 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1232 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1233 1234 // Note: if we're looking at the last region in heap - top 1235 // could be actually just beyond the end of the heap; end_idx 1236 // will then correspond to a (non-existent) card that is also 1237 // just beyond the heap. 1238 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1239 // end of object is not card aligned - increment to cover 1240 // all the cards spanned by the object 1241 end_idx += 1; 1242 } 1243 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1244 1245 // This definitely means the region has live objects. 1246 set_bit_for_region(hr); 1247 } 1248 1249 // Update the live region bitmap. 1250 if (marked_bytes > 0) { 1251 set_bit_for_region(hr); 1252 } 1253 1254 // Set the marked bytes for the current region so that 1255 // it can be queried by a calling verification routine 1256 _region_marked_bytes = marked_bytes; 1257 1258 return false; 1259 } 1260 1261 size_t region_marked_bytes() const { return _region_marked_bytes; } 1262 }; 1263 1264 // Heap region closure used for verifying the counting data 1265 // that was accumulated concurrently and aggregated during 1266 // the remark pause. This closure is applied to the heap 1267 // regions during the STW cleanup pause. 1268 1269 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure { 1270 G1CollectedHeap* _g1h; 1271 G1ConcurrentMark* _cm; 1272 CalcLiveObjectsClosure _calc_cl; 1273 BitMap* _region_bm; // Region BM to be verified 1274 BitMap* _card_bm; // Card BM to be verified 1275 1276 BitMap* _exp_region_bm; // Expected Region BM values 1277 BitMap* _exp_card_bm; // Expected card BM values 1278 1279 int _failures; 1280 1281 public: 1282 VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h, 1283 BitMap* region_bm, 1284 BitMap* card_bm, 1285 BitMap* exp_region_bm, 1286 BitMap* exp_card_bm) : 1287 _g1h(g1h), _cm(g1h->concurrent_mark()), 1288 _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm), 1289 _region_bm(region_bm), _card_bm(card_bm), 1290 _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm), 1291 _failures(0) { } 1292 1293 int failures() const { return _failures; } 1294 1295 bool doHeapRegion(HeapRegion* hr) { 1296 int failures = 0; 1297 1298 // Call the CalcLiveObjectsClosure to walk the marking bitmap for 1299 // this region and set the corresponding bits in the expected region 1300 // and card bitmaps. 1301 bool res = _calc_cl.doHeapRegion(hr); 1302 assert(res == false, "should be continuing"); 1303 1304 // Verify the marked bytes for this region. 1305 size_t exp_marked_bytes = _calc_cl.region_marked_bytes(); 1306 size_t act_marked_bytes = hr->next_marked_bytes(); 1307 1308 if (exp_marked_bytes > act_marked_bytes) { 1309 if (hr->is_starts_humongous()) { 1310 // For start_humongous regions, the size of the whole object will be 1311 // in exp_marked_bytes. 1312 HeapRegion* region = hr; 1313 int num_regions; 1314 for (num_regions = 0; region != NULL; num_regions++) { 1315 region = _g1h->next_region_in_humongous(region); 1316 } 1317 if ((num_regions-1) * HeapRegion::GrainBytes >= exp_marked_bytes) { 1318 failures += 1; 1319 } else if (num_regions * HeapRegion::GrainBytes < exp_marked_bytes) { 1320 failures += 1; 1321 } 1322 } else { 1323 // We're not OK if expected marked bytes > actual marked bytes. It means 1324 // we have missed accounting some objects during the actual marking. 1325 failures += 1; 1326 } 1327 } 1328 1329 // Verify the bit, for this region, in the actual and expected 1330 // (which was just calculated) region bit maps. 1331 // We're not OK if the bit in the calculated expected region 1332 // bitmap is set and the bit in the actual region bitmap is not. 1333 BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 1334 1335 bool expected = _exp_region_bm->at(index); 1336 bool actual = _region_bm->at(index); 1337 if (expected && !actual) { 1338 failures += 1; 1339 } 1340 1341 // Verify that the card bit maps for the cards spanned by the current 1342 // region match. We have an error if we have a set bit in the expected 1343 // bit map and the corresponding bit in the actual bitmap is not set. 1344 1345 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom()); 1346 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top()); 1347 1348 for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) { 1349 expected = _exp_card_bm->at(i); 1350 actual = _card_bm->at(i); 1351 1352 if (expected && !actual) { 1353 failures += 1; 1354 } 1355 } 1356 1357 _failures += failures; 1358 1359 // We could stop iteration over the heap when we 1360 // find the first violating region by returning true. 1361 return false; 1362 } 1363 }; 1364 1365 class G1ParVerifyFinalCountTask: public AbstractGangTask { 1366 protected: 1367 G1CollectedHeap* _g1h; 1368 G1ConcurrentMark* _cm; 1369 BitMap* _actual_region_bm; 1370 BitMap* _actual_card_bm; 1371 1372 uint _n_workers; 1373 1374 BitMap* _expected_region_bm; 1375 BitMap* _expected_card_bm; 1376 1377 int _failures; 1378 1379 HeapRegionClaimer _hrclaimer; 1380 1381 public: 1382 G1ParVerifyFinalCountTask(G1CollectedHeap* g1h, 1383 BitMap* region_bm, BitMap* card_bm, 1384 BitMap* expected_region_bm, BitMap* expected_card_bm) 1385 : AbstractGangTask("G1 verify final counting"), 1386 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1387 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1388 _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm), 1389 _failures(0), 1390 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1391 assert(VerifyDuringGC, "don't call this otherwise"); 1392 assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity"); 1393 assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity"); 1394 } 1395 1396 void work(uint worker_id) { 1397 assert(worker_id < _n_workers, "invariant"); 1398 1399 VerifyLiveObjectDataHRClosure verify_cl(_g1h, 1400 _actual_region_bm, _actual_card_bm, 1401 _expected_region_bm, 1402 _expected_card_bm); 1403 1404 _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer); 1405 1406 Atomic::add(verify_cl.failures(), &_failures); 1407 } 1408 1409 int failures() const { return _failures; } 1410 }; 1411 1412 // Closure that finalizes the liveness counting data. 1413 // Used during the cleanup pause. 1414 // Sets the bits corresponding to the interval [NTAMS, top] 1415 // (which contains the implicitly live objects) in the 1416 // card liveness bitmap. Also sets the bit for each region, 1417 // containing live data, in the region liveness bitmap. 1418 1419 class FinalCountDataUpdateClosure: public G1CMCountDataClosureBase { 1420 public: 1421 FinalCountDataUpdateClosure(G1CollectedHeap* g1h, 1422 BitMap* region_bm, 1423 BitMap* card_bm) : 1424 G1CMCountDataClosureBase(g1h, region_bm, card_bm) { } 1425 1426 bool doHeapRegion(HeapRegion* hr) { 1427 HeapWord* ntams = hr->next_top_at_mark_start(); 1428 HeapWord* top = hr->top(); 1429 1430 assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 1431 1432 // Mark the allocated-since-marking portion... 1433 if (ntams < top) { 1434 // This definitely means the region has live objects. 1435 set_bit_for_region(hr); 1436 1437 // Now set the bits in the card bitmap for [ntams, top) 1438 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams); 1439 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top); 1440 1441 // Note: if we're looking at the last region in heap - top 1442 // could be actually just beyond the end of the heap; end_idx 1443 // will then correspond to a (non-existent) card that is also 1444 // just beyond the heap. 1445 if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) { 1446 // end of object is not card aligned - increment to cover 1447 // all the cards spanned by the object 1448 end_idx += 1; 1449 } 1450 1451 assert(end_idx <= _card_bm->size(), 1452 "oob: end_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1453 end_idx, _card_bm->size()); 1454 assert(start_idx < _card_bm->size(), 1455 "oob: start_idx= " SIZE_FORMAT ", bitmap size= " SIZE_FORMAT, 1456 start_idx, _card_bm->size()); 1457 1458 _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */); 1459 } 1460 1461 // Set the bit for the region if it contains live data 1462 if (hr->next_marked_bytes() > 0) { 1463 set_bit_for_region(hr); 1464 } 1465 1466 return false; 1467 } 1468 }; 1469 1470 class G1ParFinalCountTask: public AbstractGangTask { 1471 protected: 1472 G1CollectedHeap* _g1h; 1473 G1ConcurrentMark* _cm; 1474 BitMap* _actual_region_bm; 1475 BitMap* _actual_card_bm; 1476 1477 uint _n_workers; 1478 HeapRegionClaimer _hrclaimer; 1479 1480 public: 1481 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 1482 : AbstractGangTask("G1 final counting"), 1483 _g1h(g1h), _cm(_g1h->concurrent_mark()), 1484 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 1485 _n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) { 1486 } 1487 1488 void work(uint worker_id) { 1489 assert(worker_id < _n_workers, "invariant"); 1490 1491 FinalCountDataUpdateClosure final_update_cl(_g1h, 1492 _actual_region_bm, 1493 _actual_card_bm); 1494 1495 _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer); 1496 } 1497 }; 1498 1499 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1500 G1CollectedHeap* _g1; 1501 size_t _freed_bytes; 1502 FreeRegionList* _local_cleanup_list; 1503 uint _old_regions_removed; 1504 uint _humongous_regions_removed; 1505 HRRSCleanupTask* _hrrs_cleanup_task; 1506 1507 public: 1508 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1509 FreeRegionList* local_cleanup_list, 1510 HRRSCleanupTask* hrrs_cleanup_task) : 1511 _g1(g1), 1512 _freed_bytes(0), 1513 _local_cleanup_list(local_cleanup_list), 1514 _old_regions_removed(0), 1515 _humongous_regions_removed(0), 1516 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1517 1518 size_t freed_bytes() { return _freed_bytes; } 1519 const uint old_regions_removed() { return _old_regions_removed; } 1520 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1521 1522 bool doHeapRegion(HeapRegion *hr) { 1523 if (hr->is_archive()) { 1524 return false; 1525 } 1526 // We use a claim value of zero here because all regions 1527 // were claimed with value 1 in the FinalCount task. 1528 _g1->reset_gc_time_stamps(hr); 1529 hr->note_end_of_marking(); 1530 1531 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) { 1532 _freed_bytes += hr->used(); 1533 hr->set_containing_set(NULL); 1534 if (hr->is_humongous()) { 1535 _humongous_regions_removed++; 1536 _g1->free_humongous_region(hr, _local_cleanup_list, true); 1537 } else { 1538 _old_regions_removed++; 1539 _g1->free_region(hr, _local_cleanup_list, true); 1540 } 1541 } else { 1542 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1543 } 1544 1545 return false; 1546 } 1547 }; 1548 1549 class G1ParNoteEndTask: public AbstractGangTask { 1550 friend class G1NoteEndOfConcMarkClosure; 1551 1552 protected: 1553 G1CollectedHeap* _g1h; 1554 FreeRegionList* _cleanup_list; 1555 HeapRegionClaimer _hrclaimer; 1556 1557 public: 1558 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1559 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1560 } 1561 1562 void work(uint worker_id) { 1563 FreeRegionList local_cleanup_list("Local Cleanup List"); 1564 HRRSCleanupTask hrrs_cleanup_task; 1565 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1566 &hrrs_cleanup_task); 1567 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer); 1568 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1569 1570 // Now update the lists 1571 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1572 { 1573 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1574 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1575 1576 // If we iterate over the global cleanup list at the end of 1577 // cleanup to do this printing we will not guarantee to only 1578 // generate output for the newly-reclaimed regions (the list 1579 // might not be empty at the beginning of cleanup; we might 1580 // still be working on its previous contents). So we do the 1581 // printing here, before we append the new regions to the global 1582 // cleanup list. 1583 1584 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1585 if (hr_printer->is_active()) { 1586 FreeRegionListIterator iter(&local_cleanup_list); 1587 while (iter.more_available()) { 1588 HeapRegion* hr = iter.get_next(); 1589 hr_printer->cleanup(hr); 1590 } 1591 } 1592 1593 _cleanup_list->add_ordered(&local_cleanup_list); 1594 assert(local_cleanup_list.is_empty(), "post-condition"); 1595 1596 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1597 } 1598 } 1599 }; 1600 1601 void G1ConcurrentMark::cleanup() { 1602 // world is stopped at this checkpoint 1603 assert(SafepointSynchronize::is_at_safepoint(), 1604 "world should be stopped"); 1605 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1606 1607 // If a full collection has happened, we shouldn't do this. 1608 if (has_aborted()) { 1609 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1610 return; 1611 } 1612 1613 g1h->verifier()->verify_region_sets_optional(); 1614 1615 if (VerifyDuringGC) { 1616 HandleMark hm; // handle scope 1617 g1h->prepare_for_verify(); 1618 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1619 } 1620 g1h->verifier()->check_bitmaps("Cleanup Start"); 1621 1622 G1CollectorPolicy* g1p = g1h->g1_policy(); 1623 g1p->record_concurrent_mark_cleanup_start(); 1624 1625 double start = os::elapsedTime(); 1626 1627 HeapRegionRemSet::reset_for_cleanup_tasks(); 1628 1629 // Do counting once more with the world stopped for good measure. 1630 G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); 1631 1632 g1h->workers()->run_task(&g1_par_count_task); 1633 1634 if (VerifyDuringGC) { 1635 // Verify that the counting data accumulated during marking matches 1636 // that calculated by walking the marking bitmap. 1637 1638 // Bitmaps to hold expected values 1639 BitMap expected_region_bm(_region_bm.size(), true); 1640 BitMap expected_card_bm(_card_bm.size(), true); 1641 1642 G1ParVerifyFinalCountTask g1_par_verify_task(g1h, 1643 &_region_bm, 1644 &_card_bm, 1645 &expected_region_bm, 1646 &expected_card_bm); 1647 1648 g1h->workers()->run_task(&g1_par_verify_task); 1649 1650 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 1651 } 1652 1653 size_t start_used_bytes = g1h->used(); 1654 g1h->collector_state()->set_mark_in_progress(false); 1655 1656 double count_end = os::elapsedTime(); 1657 double this_final_counting_time = (count_end - start); 1658 _total_counting_time += this_final_counting_time; 1659 1660 if (log_is_enabled(Trace, gc, liveness)) { 1661 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1662 _g1h->heap_region_iterate(&cl); 1663 } 1664 1665 // Install newly created mark bitMap as "prev". 1666 swapMarkBitMaps(); 1667 1668 g1h->reset_gc_time_stamp(); 1669 1670 uint n_workers = _g1h->workers()->active_workers(); 1671 1672 // Note end of marking in all heap regions. 1673 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1674 g1h->workers()->run_task(&g1_par_note_end_task); 1675 g1h->check_gc_time_stamps(); 1676 1677 if (!cleanup_list_is_empty()) { 1678 // The cleanup list is not empty, so we'll have to process it 1679 // concurrently. Notify anyone else that might be wanting free 1680 // regions that there will be more free regions coming soon. 1681 g1h->set_free_regions_coming(); 1682 } 1683 1684 // call below, since it affects the metric by which we sort the heap 1685 // regions. 1686 if (G1ScrubRemSets) { 1687 double rs_scrub_start = os::elapsedTime(); 1688 g1h->scrub_rem_set(&_region_bm, &_card_bm); 1689 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1690 } 1691 1692 // this will also free any regions totally full of garbage objects, 1693 // and sort the regions. 1694 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1695 1696 // Statistics. 1697 double end = os::elapsedTime(); 1698 _cleanup_times.add((end - start) * 1000.0); 1699 1700 // Clean up will have freed any regions completely full of garbage. 1701 // Update the soft reference policy with the new heap occupancy. 1702 Universe::update_heap_info_at_gc(); 1703 1704 if (VerifyDuringGC) { 1705 HandleMark hm; // handle scope 1706 g1h->prepare_for_verify(); 1707 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1708 } 1709 1710 g1h->verifier()->check_bitmaps("Cleanup End"); 1711 1712 g1h->verifier()->verify_region_sets_optional(); 1713 1714 // We need to make this be a "collection" so any collection pause that 1715 // races with it goes around and waits for completeCleanup to finish. 1716 g1h->increment_total_collections(); 1717 1718 // Clean out dead classes and update Metaspace sizes. 1719 if (ClassUnloadingWithConcurrentMark) { 1720 ClassLoaderDataGraph::purge(); 1721 } 1722 MetaspaceGC::compute_new_size(); 1723 1724 // We reclaimed old regions so we should calculate the sizes to make 1725 // sure we update the old gen/space data. 1726 g1h->g1mm()->update_sizes(); 1727 g1h->allocation_context_stats().update_after_mark(); 1728 1729 g1h->trace_heap_after_concurrent_cycle(); 1730 } 1731 1732 void G1ConcurrentMark::completeCleanup() { 1733 if (has_aborted()) return; 1734 1735 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1736 1737 _cleanup_list.verify_optional(); 1738 FreeRegionList tmp_free_list("Tmp Free List"); 1739 1740 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1741 "cleanup list has %u entries", 1742 _cleanup_list.length()); 1743 1744 // No one else should be accessing the _cleanup_list at this point, 1745 // so it is not necessary to take any locks 1746 while (!_cleanup_list.is_empty()) { 1747 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1748 assert(hr != NULL, "Got NULL from a non-empty list"); 1749 hr->par_clear(); 1750 tmp_free_list.add_ordered(hr); 1751 1752 // Instead of adding one region at a time to the secondary_free_list, 1753 // we accumulate them in the local list and move them a few at a 1754 // time. This also cuts down on the number of notify_all() calls 1755 // we do during this process. We'll also append the local list when 1756 // _cleanup_list is empty (which means we just removed the last 1757 // region from the _cleanup_list). 1758 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1759 _cleanup_list.is_empty()) { 1760 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1761 "appending %u entries to the secondary_free_list, " 1762 "cleanup list still has %u entries", 1763 tmp_free_list.length(), 1764 _cleanup_list.length()); 1765 1766 { 1767 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1768 g1h->secondary_free_list_add(&tmp_free_list); 1769 SecondaryFreeList_lock->notify_all(); 1770 } 1771 #ifndef PRODUCT 1772 if (G1StressConcRegionFreeing) { 1773 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1774 os::sleep(Thread::current(), (jlong) 1, false); 1775 } 1776 } 1777 #endif 1778 } 1779 } 1780 assert(tmp_free_list.is_empty(), "post-condition"); 1781 } 1782 1783 // Supporting Object and Oop closures for reference discovery 1784 // and processing in during marking 1785 1786 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1787 HeapWord* addr = (HeapWord*)obj; 1788 return addr != NULL && 1789 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1790 } 1791 1792 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1793 // Uses the G1CMTask associated with a worker thread (for serial reference 1794 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1795 // trace referent objects. 1796 // 1797 // Using the G1CMTask and embedded local queues avoids having the worker 1798 // threads operating on the global mark stack. This reduces the risk 1799 // of overflowing the stack - which we would rather avoid at this late 1800 // state. Also using the tasks' local queues removes the potential 1801 // of the workers interfering with each other that could occur if 1802 // operating on the global stack. 1803 1804 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1805 G1ConcurrentMark* _cm; 1806 G1CMTask* _task; 1807 int _ref_counter_limit; 1808 int _ref_counter; 1809 bool _is_serial; 1810 public: 1811 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1812 _cm(cm), _task(task), _is_serial(is_serial), 1813 _ref_counter_limit(G1RefProcDrainInterval) { 1814 assert(_ref_counter_limit > 0, "sanity"); 1815 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1816 _ref_counter = _ref_counter_limit; 1817 } 1818 1819 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1820 virtual void do_oop( oop* p) { do_oop_work(p); } 1821 1822 template <class T> void do_oop_work(T* p) { 1823 if (!_cm->has_overflown()) { 1824 oop obj = oopDesc::load_decode_heap_oop(p); 1825 _task->deal_with_reference(obj); 1826 _ref_counter--; 1827 1828 if (_ref_counter == 0) { 1829 // We have dealt with _ref_counter_limit references, pushing them 1830 // and objects reachable from them on to the local stack (and 1831 // possibly the global stack). Call G1CMTask::do_marking_step() to 1832 // process these entries. 1833 // 1834 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1835 // there's nothing more to do (i.e. we're done with the entries that 1836 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1837 // above) or we overflow. 1838 // 1839 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1840 // flag while there may still be some work to do. (See the comment at 1841 // the beginning of G1CMTask::do_marking_step() for those conditions - 1842 // one of which is reaching the specified time target.) It is only 1843 // when G1CMTask::do_marking_step() returns without setting the 1844 // has_aborted() flag that the marking step has completed. 1845 do { 1846 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1847 _task->do_marking_step(mark_step_duration_ms, 1848 false /* do_termination */, 1849 _is_serial); 1850 } while (_task->has_aborted() && !_cm->has_overflown()); 1851 _ref_counter = _ref_counter_limit; 1852 } 1853 } 1854 } 1855 }; 1856 1857 // 'Drain' oop closure used by both serial and parallel reference processing. 1858 // Uses the G1CMTask associated with a given worker thread (for serial 1859 // reference processing the G1CMtask for worker 0 is used). Calls the 1860 // do_marking_step routine, with an unbelievably large timeout value, 1861 // to drain the marking data structures of the remaining entries 1862 // added by the 'keep alive' oop closure above. 1863 1864 class G1CMDrainMarkingStackClosure: public VoidClosure { 1865 G1ConcurrentMark* _cm; 1866 G1CMTask* _task; 1867 bool _is_serial; 1868 public: 1869 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1870 _cm(cm), _task(task), _is_serial(is_serial) { 1871 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1872 } 1873 1874 void do_void() { 1875 do { 1876 // We call G1CMTask::do_marking_step() to completely drain the local 1877 // and global marking stacks of entries pushed by the 'keep alive' 1878 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1879 // 1880 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1881 // if there's nothing more to do (i.e. we've completely drained the 1882 // entries that were pushed as a a result of applying the 'keep alive' 1883 // closure to the entries on the discovered ref lists) or we overflow 1884 // the global marking stack. 1885 // 1886 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1887 // flag while there may still be some work to do. (See the comment at 1888 // the beginning of G1CMTask::do_marking_step() for those conditions - 1889 // one of which is reaching the specified time target.) It is only 1890 // when G1CMTask::do_marking_step() returns without setting the 1891 // has_aborted() flag that the marking step has completed. 1892 1893 _task->do_marking_step(1000000000.0 /* something very large */, 1894 true /* do_termination */, 1895 _is_serial); 1896 } while (_task->has_aborted() && !_cm->has_overflown()); 1897 } 1898 }; 1899 1900 // Implementation of AbstractRefProcTaskExecutor for parallel 1901 // reference processing at the end of G1 concurrent marking 1902 1903 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1904 private: 1905 G1CollectedHeap* _g1h; 1906 G1ConcurrentMark* _cm; 1907 WorkGang* _workers; 1908 uint _active_workers; 1909 1910 public: 1911 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1912 G1ConcurrentMark* cm, 1913 WorkGang* workers, 1914 uint n_workers) : 1915 _g1h(g1h), _cm(cm), 1916 _workers(workers), _active_workers(n_workers) { } 1917 1918 // Executes the given task using concurrent marking worker threads. 1919 virtual void execute(ProcessTask& task); 1920 virtual void execute(EnqueueTask& task); 1921 }; 1922 1923 class G1CMRefProcTaskProxy: public AbstractGangTask { 1924 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1925 ProcessTask& _proc_task; 1926 G1CollectedHeap* _g1h; 1927 G1ConcurrentMark* _cm; 1928 1929 public: 1930 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1931 G1CollectedHeap* g1h, 1932 G1ConcurrentMark* cm) : 1933 AbstractGangTask("Process reference objects in parallel"), 1934 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1935 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1936 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1937 } 1938 1939 virtual void work(uint worker_id) { 1940 ResourceMark rm; 1941 HandleMark hm; 1942 G1CMTask* task = _cm->task(worker_id); 1943 G1CMIsAliveClosure g1_is_alive(_g1h); 1944 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1945 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1946 1947 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1948 } 1949 }; 1950 1951 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1952 assert(_workers != NULL, "Need parallel worker threads."); 1953 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1954 1955 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1956 1957 // We need to reset the concurrency level before each 1958 // proxy task execution, so that the termination protocol 1959 // and overflow handling in G1CMTask::do_marking_step() knows 1960 // how many workers to wait for. 1961 _cm->set_concurrency(_active_workers); 1962 _workers->run_task(&proc_task_proxy); 1963 } 1964 1965 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1966 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1967 EnqueueTask& _enq_task; 1968 1969 public: 1970 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1971 AbstractGangTask("Enqueue reference objects in parallel"), 1972 _enq_task(enq_task) { } 1973 1974 virtual void work(uint worker_id) { 1975 _enq_task.work(worker_id); 1976 } 1977 }; 1978 1979 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1980 assert(_workers != NULL, "Need parallel worker threads."); 1981 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1982 1983 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1984 1985 // Not strictly necessary but... 1986 // 1987 // We need to reset the concurrency level before each 1988 // proxy task execution, so that the termination protocol 1989 // and overflow handling in G1CMTask::do_marking_step() knows 1990 // how many workers to wait for. 1991 _cm->set_concurrency(_active_workers); 1992 _workers->run_task(&enq_task_proxy); 1993 } 1994 1995 void G1ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) { 1996 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes); 1997 } 1998 1999 void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2000 if (has_overflown()) { 2001 // Skip processing the discovered references if we have 2002 // overflown the global marking stack. Reference objects 2003 // only get discovered once so it is OK to not 2004 // de-populate the discovered reference lists. We could have, 2005 // but the only benefit would be that, when marking restarts, 2006 // less reference objects are discovered. 2007 return; 2008 } 2009 2010 ResourceMark rm; 2011 HandleMark hm; 2012 2013 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2014 2015 // Is alive closure. 2016 G1CMIsAliveClosure g1_is_alive(g1h); 2017 2018 // Inner scope to exclude the cleaning of the string and symbol 2019 // tables from the displayed time. 2020 { 2021 GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm()); 2022 2023 ReferenceProcessor* rp = g1h->ref_processor_cm(); 2024 2025 // See the comment in G1CollectedHeap::ref_processing_init() 2026 // about how reference processing currently works in G1. 2027 2028 // Set the soft reference policy 2029 rp->setup_policy(clear_all_soft_refs); 2030 assert(_markStack.isEmpty(), "mark stack should be empty"); 2031 2032 // Instances of the 'Keep Alive' and 'Complete GC' closures used 2033 // in serial reference processing. Note these closures are also 2034 // used for serially processing (by the the current thread) the 2035 // JNI references during parallel reference processing. 2036 // 2037 // These closures do not need to synchronize with the worker 2038 // threads involved in parallel reference processing as these 2039 // instances are executed serially by the current thread (e.g. 2040 // reference processing is not multi-threaded and is thus 2041 // performed by the current thread instead of a gang worker). 2042 // 2043 // The gang tasks involved in parallel reference processing create 2044 // their own instances of these closures, which do their own 2045 // synchronization among themselves. 2046 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 2047 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 2048 2049 // We need at least one active thread. If reference processing 2050 // is not multi-threaded we use the current (VMThread) thread, 2051 // otherwise we use the work gang from the G1CollectedHeap and 2052 // we utilize all the worker threads we can. 2053 bool processing_is_mt = rp->processing_is_mt(); 2054 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 2055 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); 2056 2057 // Parallel processing task executor. 2058 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 2059 g1h->workers(), active_workers); 2060 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 2061 2062 // Set the concurrency level. The phase was already set prior to 2063 // executing the remark task. 2064 set_concurrency(active_workers); 2065 2066 // Set the degree of MT processing here. If the discovery was done MT, 2067 // the number of threads involved during discovery could differ from 2068 // the number of active workers. This is OK as long as the discovered 2069 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 2070 rp->set_active_mt_degree(active_workers); 2071 2072 // Process the weak references. 2073 const ReferenceProcessorStats& stats = 2074 rp->process_discovered_references(&g1_is_alive, 2075 &g1_keep_alive, 2076 &g1_drain_mark_stack, 2077 executor, 2078 g1h->gc_timer_cm()); 2079 g1h->gc_tracer_cm()->report_gc_reference_stats(stats); 2080 2081 // The do_oop work routines of the keep_alive and drain_marking_stack 2082 // oop closures will set the has_overflown flag if we overflow the 2083 // global marking stack. 2084 2085 assert(_markStack.overflow() || _markStack.isEmpty(), 2086 "mark stack should be empty (unless it overflowed)"); 2087 2088 if (_markStack.overflow()) { 2089 // This should have been done already when we tried to push an 2090 // entry on to the global mark stack. But let's do it again. 2091 set_has_overflown(); 2092 } 2093 2094 assert(rp->num_q() == active_workers, "why not"); 2095 2096 rp->enqueue_discovered_references(executor); 2097 2098 rp->verify_no_references_recorded(); 2099 assert(!rp->discovery_enabled(), "Post condition"); 2100 } 2101 2102 if (has_overflown()) { 2103 // We can not trust g1_is_alive if the marking stack overflowed 2104 return; 2105 } 2106 2107 assert(_markStack.isEmpty(), "Marking should have completed"); 2108 2109 // Unload Klasses, String, Symbols, Code Cache, etc. 2110 { 2111 GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); 2112 2113 if (ClassUnloadingWithConcurrentMark) { 2114 bool purged_classes; 2115 2116 { 2117 GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); 2118 purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); 2119 } 2120 2121 { 2122 GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); 2123 weakRefsWorkParallelPart(&g1_is_alive, purged_classes); 2124 } 2125 } 2126 2127 if (G1StringDedup::is_enabled()) { 2128 GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); 2129 G1StringDedup::unlink(&g1_is_alive); 2130 } 2131 } 2132 } 2133 2134 void G1ConcurrentMark::swapMarkBitMaps() { 2135 G1CMBitMapRO* temp = _prevMarkBitMap; 2136 _prevMarkBitMap = (G1CMBitMapRO*)_nextMarkBitMap; 2137 _nextMarkBitMap = (G1CMBitMap*) temp; 2138 } 2139 2140 // Closure for marking entries in SATB buffers. 2141 class G1CMSATBBufferClosure : public SATBBufferClosure { 2142 private: 2143 G1CMTask* _task; 2144 G1CollectedHeap* _g1h; 2145 2146 // This is very similar to G1CMTask::deal_with_reference, but with 2147 // more relaxed requirements for the argument, so this must be more 2148 // circumspect about treating the argument as an object. 2149 void do_entry(void* entry) const { 2150 _task->increment_refs_reached(); 2151 HeapRegion* hr = _g1h->heap_region_containing(entry); 2152 if (entry < hr->next_top_at_mark_start()) { 2153 // Until we get here, we don't know whether entry refers to a valid 2154 // object; it could instead have been a stale reference. 2155 oop obj = static_cast<oop>(entry); 2156 assert(obj->is_oop(true /* ignore mark word */), 2157 "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(obj)); 2158 _task->make_reference_grey(obj, hr); 2159 } 2160 } 2161 2162 public: 2163 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 2164 : _task(task), _g1h(g1h) { } 2165 2166 virtual void do_buffer(void** buffer, size_t size) { 2167 for (size_t i = 0; i < size; ++i) { 2168 do_entry(buffer[i]); 2169 } 2170 } 2171 }; 2172 2173 class G1RemarkThreadsClosure : public ThreadClosure { 2174 G1CMSATBBufferClosure _cm_satb_cl; 2175 G1CMOopClosure _cm_cl; 2176 MarkingCodeBlobClosure _code_cl; 2177 int _thread_parity; 2178 2179 public: 2180 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 2181 _cm_satb_cl(task, g1h), 2182 _cm_cl(g1h, g1h->concurrent_mark(), task), 2183 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 2184 _thread_parity(Threads::thread_claim_parity()) {} 2185 2186 void do_thread(Thread* thread) { 2187 if (thread->is_Java_thread()) { 2188 if (thread->claim_oops_do(true, _thread_parity)) { 2189 JavaThread* jt = (JavaThread*)thread; 2190 2191 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 2192 // however the liveness of oops reachable from nmethods have very complex lifecycles: 2193 // * Alive if on the stack of an executing method 2194 // * Weakly reachable otherwise 2195 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 2196 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 2197 jt->nmethods_do(&_code_cl); 2198 2199 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 2200 } 2201 } else if (thread->is_VM_thread()) { 2202 if (thread->claim_oops_do(true, _thread_parity)) { 2203 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 2204 } 2205 } 2206 } 2207 }; 2208 2209 class G1CMRemarkTask: public AbstractGangTask { 2210 private: 2211 G1ConcurrentMark* _cm; 2212 public: 2213 void work(uint worker_id) { 2214 // Since all available tasks are actually started, we should 2215 // only proceed if we're supposed to be active. 2216 if (worker_id < _cm->active_tasks()) { 2217 G1CMTask* task = _cm->task(worker_id); 2218 task->record_start_time(); 2219 { 2220 ResourceMark rm; 2221 HandleMark hm; 2222 2223 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 2224 Threads::threads_do(&threads_f); 2225 } 2226 2227 do { 2228 task->do_marking_step(1000000000.0 /* something very large */, 2229 true /* do_termination */, 2230 false /* is_serial */); 2231 } while (task->has_aborted() && !_cm->has_overflown()); 2232 // If we overflow, then we do not want to restart. We instead 2233 // want to abort remark and do concurrent marking again. 2234 task->record_end_time(); 2235 } 2236 } 2237 2238 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 2239 AbstractGangTask("Par Remark"), _cm(cm) { 2240 _cm->terminator()->reset_for_reuse(active_workers); 2241 } 2242 }; 2243 2244 void G1ConcurrentMark::checkpointRootsFinalWork() { 2245 ResourceMark rm; 2246 HandleMark hm; 2247 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2248 2249 GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); 2250 2251 g1h->ensure_parsability(false); 2252 2253 // this is remark, so we'll use up all active threads 2254 uint active_workers = g1h->workers()->active_workers(); 2255 set_concurrency_and_phase(active_workers, false /* concurrent */); 2256 // Leave _parallel_marking_threads at it's 2257 // value originally calculated in the G1ConcurrentMark 2258 // constructor and pass values of the active workers 2259 // through the gang in the task. 2260 2261 { 2262 StrongRootsScope srs(active_workers); 2263 2264 G1CMRemarkTask remarkTask(this, active_workers); 2265 // We will start all available threads, even if we decide that the 2266 // active_workers will be fewer. The extra ones will just bail out 2267 // immediately. 2268 g1h->workers()->run_task(&remarkTask); 2269 } 2270 2271 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2272 guarantee(has_overflown() || 2273 satb_mq_set.completed_buffers_num() == 0, 2274 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 2275 BOOL_TO_STR(has_overflown()), 2276 satb_mq_set.completed_buffers_num()); 2277 2278 print_stats(); 2279 } 2280 2281 void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) { 2282 // Note we are overriding the read-only view of the prev map here, via 2283 // the cast. 2284 ((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr); 2285 } 2286 2287 HeapRegion* 2288 G1ConcurrentMark::claim_region(uint worker_id) { 2289 // "checkpoint" the finger 2290 HeapWord* finger = _finger; 2291 2292 // _heap_end will not change underneath our feet; it only changes at 2293 // yield points. 2294 while (finger < _heap_end) { 2295 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 2296 2297 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 2298 2299 // Above heap_region_containing may return NULL as we always scan claim 2300 // until the end of the heap. In this case, just jump to the next region. 2301 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 2302 2303 // Is the gap between reading the finger and doing the CAS too long? 2304 HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); 2305 if (res == finger && curr_region != NULL) { 2306 // we succeeded 2307 HeapWord* bottom = curr_region->bottom(); 2308 HeapWord* limit = curr_region->next_top_at_mark_start(); 2309 2310 // notice that _finger == end cannot be guaranteed here since, 2311 // someone else might have moved the finger even further 2312 assert(_finger >= end, "the finger should have moved forward"); 2313 2314 if (limit > bottom) { 2315 return curr_region; 2316 } else { 2317 assert(limit == bottom, 2318 "the region limit should be at bottom"); 2319 // we return NULL and the caller should try calling 2320 // claim_region() again. 2321 return NULL; 2322 } 2323 } else { 2324 assert(_finger > finger, "the finger should have moved forward"); 2325 // read it again 2326 finger = _finger; 2327 } 2328 } 2329 2330 return NULL; 2331 } 2332 2333 #ifndef PRODUCT 2334 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 2335 private: 2336 G1CollectedHeap* _g1h; 2337 const char* _phase; 2338 int _info; 2339 2340 public: 2341 VerifyNoCSetOops(const char* phase, int info = -1) : 2342 _g1h(G1CollectedHeap::heap()), 2343 _phase(phase), 2344 _info(info) 2345 { } 2346 2347 void operator()(oop obj) const { 2348 guarantee(obj->is_oop(), 2349 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 2350 p2i(obj), _phase, _info); 2351 guarantee(!_g1h->obj_in_cs(obj), 2352 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 2353 p2i(obj), _phase, _info); 2354 } 2355 }; 2356 2357 void G1ConcurrentMark::verify_no_cset_oops() { 2358 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 2359 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 2360 return; 2361 } 2362 2363 // Verify entries on the global mark stack 2364 _markStack.iterate(VerifyNoCSetOops("Stack")); 2365 2366 // Verify entries on the task queues 2367 for (uint i = 0; i < _max_worker_id; ++i) { 2368 G1CMTaskQueue* queue = _task_queues->queue(i); 2369 queue->iterate(VerifyNoCSetOops("Queue", i)); 2370 } 2371 2372 // Verify the global finger 2373 HeapWord* global_finger = finger(); 2374 if (global_finger != NULL && global_finger < _heap_end) { 2375 // Since we always iterate over all regions, we might get a NULL HeapRegion 2376 // here. 2377 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 2378 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 2379 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 2380 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 2381 } 2382 2383 // Verify the task fingers 2384 assert(parallel_marking_threads() <= _max_worker_id, "sanity"); 2385 for (uint i = 0; i < parallel_marking_threads(); ++i) { 2386 G1CMTask* task = _tasks[i]; 2387 HeapWord* task_finger = task->finger(); 2388 if (task_finger != NULL && task_finger < _heap_end) { 2389 // See above note on the global finger verification. 2390 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 2391 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 2392 !task_hr->in_collection_set(), 2393 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 2394 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 2395 } 2396 } 2397 } 2398 #endif // PRODUCT 2399 2400 // Aggregate the counting data that was constructed concurrently 2401 // with marking. 2402 class AggregateCountDataHRClosure: public HeapRegionClosure { 2403 G1CollectedHeap* _g1h; 2404 G1ConcurrentMark* _cm; 2405 CardTableModRefBS* _ct_bs; 2406 BitMap* _cm_card_bm; 2407 uint _max_worker_id; 2408 2409 public: 2410 AggregateCountDataHRClosure(G1CollectedHeap* g1h, 2411 BitMap* cm_card_bm, 2412 uint max_worker_id) : 2413 _g1h(g1h), _cm(g1h->concurrent_mark()), 2414 _ct_bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())), 2415 _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { } 2416 2417 bool doHeapRegion(HeapRegion* hr) { 2418 HeapWord* start = hr->bottom(); 2419 HeapWord* limit = hr->next_top_at_mark_start(); 2420 HeapWord* end = hr->end(); 2421 2422 assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(), 2423 "Preconditions not met - " 2424 "start: " PTR_FORMAT ", limit: " PTR_FORMAT ", " 2425 "top: " PTR_FORMAT ", end: " PTR_FORMAT, 2426 p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())); 2427 2428 assert(hr->next_marked_bytes() == 0, "Precondition"); 2429 2430 if (start == limit) { 2431 // NTAMS of this region has not been set so nothing to do. 2432 return false; 2433 } 2434 2435 // 'start' should be in the heap. 2436 assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity"); 2437 // 'end' *may* be just beyond the end of the heap (if hr is the last region) 2438 assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity"); 2439 2440 BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start); 2441 BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit); 2442 BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end); 2443 2444 // If ntams is not card aligned then we bump card bitmap index 2445 // for limit so that we get the all the cards spanned by 2446 // the object ending at ntams. 2447 // Note: if this is the last region in the heap then ntams 2448 // could be actually just beyond the end of the the heap; 2449 // limit_idx will then correspond to a (non-existent) card 2450 // that is also outside the heap. 2451 if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) { 2452 limit_idx += 1; 2453 } 2454 2455 assert(limit_idx <= end_idx, "or else use atomics"); 2456 2457 // Aggregate the "stripe" in the count data associated with hr. 2458 uint hrm_index = hr->hrm_index(); 2459 size_t marked_bytes = 0; 2460 2461 for (uint i = 0; i < _max_worker_id; i += 1) { 2462 size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i); 2463 BitMap* task_card_bm = _cm->count_card_bitmap_for(i); 2464 2465 // Fetch the marked_bytes in this region for task i and 2466 // add it to the running total for this region. 2467 marked_bytes += marked_bytes_array[hrm_index]; 2468 2469 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 2470 // into the global card bitmap. 2471 BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx); 2472 2473 while (scan_idx < limit_idx) { 2474 assert(task_card_bm->at(scan_idx) == true, "should be"); 2475 _cm_card_bm->set_bit(scan_idx); 2476 assert(_cm_card_bm->at(scan_idx) == true, "should be"); 2477 2478 // BitMap::get_next_one_offset() can handle the case when 2479 // its left_offset parameter is greater than its right_offset 2480 // parameter. It does, however, have an early exit if 2481 // left_offset == right_offset. So let's limit the value 2482 // passed in for left offset here. 2483 BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx); 2484 scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx); 2485 } 2486 } 2487 2488 // Update the marked bytes for this region. 2489 hr->add_to_marked_bytes(marked_bytes); 2490 2491 // Next heap region 2492 return false; 2493 } 2494 }; 2495 2496 class G1AggregateCountDataTask: public AbstractGangTask { 2497 protected: 2498 G1CollectedHeap* _g1h; 2499 G1ConcurrentMark* _cm; 2500 BitMap* _cm_card_bm; 2501 uint _max_worker_id; 2502 uint _active_workers; 2503 HeapRegionClaimer _hrclaimer; 2504 2505 public: 2506 G1AggregateCountDataTask(G1CollectedHeap* g1h, 2507 G1ConcurrentMark* cm, 2508 BitMap* cm_card_bm, 2509 uint max_worker_id, 2510 uint n_workers) : 2511 AbstractGangTask("Count Aggregation"), 2512 _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm), 2513 _max_worker_id(max_worker_id), 2514 _active_workers(n_workers), 2515 _hrclaimer(_active_workers) { 2516 } 2517 2518 void work(uint worker_id) { 2519 AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id); 2520 2521 _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer); 2522 } 2523 }; 2524 2525 2526 void G1ConcurrentMark::aggregate_count_data() { 2527 uint n_workers = _g1h->workers()->active_workers(); 2528 2529 G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm, 2530 _max_worker_id, n_workers); 2531 2532 _g1h->workers()->run_task(&g1_par_agg_task); 2533 } 2534 2535 // Clear the per-worker arrays used to store the per-region counting data 2536 void G1ConcurrentMark::clear_all_count_data() { 2537 // Clear the global card bitmap - it will be filled during 2538 // liveness count aggregation (during remark) and the 2539 // final counting task. 2540 _card_bm.clear(); 2541 2542 // Clear the global region bitmap - it will be filled as part 2543 // of the final counting task. 2544 _region_bm.clear(); 2545 2546 uint max_regions = _g1h->max_regions(); 2547 assert(_max_worker_id > 0, "uninitialized"); 2548 2549 for (uint i = 0; i < _max_worker_id; i += 1) { 2550 BitMap* task_card_bm = count_card_bitmap_for(i); 2551 size_t* marked_bytes_array = count_marked_bytes_array_for(i); 2552 2553 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 2554 assert(marked_bytes_array != NULL, "uninitialized"); 2555 2556 memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t)); 2557 task_card_bm->clear(); 2558 } 2559 } 2560 2561 void G1ConcurrentMark::print_stats() { 2562 if (!log_is_enabled(Debug, gc, stats)) { 2563 return; 2564 } 2565 log_debug(gc, stats)("---------------------------------------------------------------------"); 2566 for (size_t i = 0; i < _active_tasks; ++i) { 2567 _tasks[i]->print_stats(); 2568 log_debug(gc, stats)("---------------------------------------------------------------------"); 2569 } 2570 } 2571 2572 // abandon current marking iteration due to a Full GC 2573 void G1ConcurrentMark::abort() { 2574 if (!cmThread()->during_cycle() || _has_aborted) { 2575 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 2576 return; 2577 } 2578 2579 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 2580 // concurrent bitmap clearing. 2581 _nextMarkBitMap->clearAll(); 2582 2583 // Note we cannot clear the previous marking bitmap here 2584 // since VerifyDuringGC verifies the objects marked during 2585 // a full GC against the previous bitmap. 2586 2587 // Clear the liveness counting data 2588 clear_all_count_data(); 2589 // Empty mark stack 2590 reset_marking_state(); 2591 for (uint i = 0; i < _max_worker_id; ++i) { 2592 _tasks[i]->clear_region_fields(); 2593 } 2594 _first_overflow_barrier_sync.abort(); 2595 _second_overflow_barrier_sync.abort(); 2596 _has_aborted = true; 2597 2598 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2599 satb_mq_set.abandon_partial_marking(); 2600 // This can be called either during or outside marking, we'll read 2601 // the expected_active value from the SATB queue set. 2602 satb_mq_set.set_active_all_threads( 2603 false, /* new active value */ 2604 satb_mq_set.is_active() /* expected_active */); 2605 2606 _g1h->trace_heap_after_concurrent_cycle(); 2607 2608 // Close any open concurrent phase timing 2609 register_concurrent_phase_end(); 2610 2611 _g1h->register_concurrent_cycle_end(); 2612 } 2613 2614 static void print_ms_time_info(const char* prefix, const char* name, 2615 NumberSeq& ns) { 2616 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2617 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2618 if (ns.num() > 0) { 2619 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2620 prefix, ns.sd(), ns.maximum()); 2621 } 2622 } 2623 2624 void G1ConcurrentMark::print_summary_info() { 2625 LogHandle(gc, marking) log; 2626 if (!log.is_trace()) { 2627 return; 2628 } 2629 2630 log.trace(" Concurrent marking:"); 2631 print_ms_time_info(" ", "init marks", _init_times); 2632 print_ms_time_info(" ", "remarks", _remark_times); 2633 { 2634 print_ms_time_info(" ", "final marks", _remark_mark_times); 2635 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2636 2637 } 2638 print_ms_time_info(" ", "cleanups", _cleanup_times); 2639 log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", 2640 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2641 if (G1ScrubRemSets) { 2642 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2643 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2644 } 2645 log.trace(" Total stop_world time = %8.2f s.", 2646 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2647 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2648 cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); 2649 } 2650 2651 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2652 _parallel_workers->print_worker_threads_on(st); 2653 } 2654 2655 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2656 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2657 p2i(_prevMarkBitMap), p2i(_nextMarkBitMap)); 2658 _prevMarkBitMap->print_on_error(st, " Prev Bits: "); 2659 _nextMarkBitMap->print_on_error(st, " Next Bits: "); 2660 } 2661 2662 // We take a break if someone is trying to stop the world. 2663 bool G1ConcurrentMark::do_yield_check(uint worker_id) { 2664 if (SuspendibleThreadSet::should_yield()) { 2665 if (worker_id == 0) { 2666 _g1h->g1_policy()->record_concurrent_pause(); 2667 } 2668 SuspendibleThreadSet::yield(); 2669 return true; 2670 } else { 2671 return false; 2672 } 2673 } 2674 2675 // Closure for iteration over bitmaps 2676 class G1CMBitMapClosure : public BitMapClosure { 2677 private: 2678 // the bitmap that is being iterated over 2679 G1CMBitMap* _nextMarkBitMap; 2680 G1ConcurrentMark* _cm; 2681 G1CMTask* _task; 2682 2683 public: 2684 G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm, G1CMBitMap* nextMarkBitMap) : 2685 _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { } 2686 2687 bool do_bit(size_t offset) { 2688 HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset); 2689 assert(_nextMarkBitMap->isMarked(addr), "invariant"); 2690 assert( addr < _cm->finger(), "invariant"); 2691 assert(addr >= _task->finger(), "invariant"); 2692 2693 // We move that task's local finger along. 2694 _task->move_finger_to(addr); 2695 2696 _task->scan_object(oop(addr)); 2697 // we only partially drain the local queue and global stack 2698 _task->drain_local_queue(true); 2699 _task->drain_global_stack(true); 2700 2701 // if the has_aborted flag has been raised, we need to bail out of 2702 // the iteration 2703 return !_task->has_aborted(); 2704 } 2705 }; 2706 2707 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2708 ReferenceProcessor* result = NULL; 2709 if (G1UseConcMarkReferenceProcessing) { 2710 result = g1h->ref_processor_cm(); 2711 assert(result != NULL, "should not be NULL"); 2712 } 2713 return result; 2714 } 2715 2716 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2717 G1ConcurrentMark* cm, 2718 G1CMTask* task) 2719 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2720 _g1h(g1h), _cm(cm), _task(task) 2721 { } 2722 2723 void G1CMTask::setup_for_region(HeapRegion* hr) { 2724 assert(hr != NULL, 2725 "claim_region() should have filtered out NULL regions"); 2726 _curr_region = hr; 2727 _finger = hr->bottom(); 2728 update_region_limit(); 2729 } 2730 2731 void G1CMTask::update_region_limit() { 2732 HeapRegion* hr = _curr_region; 2733 HeapWord* bottom = hr->bottom(); 2734 HeapWord* limit = hr->next_top_at_mark_start(); 2735 2736 if (limit == bottom) { 2737 // The region was collected underneath our feet. 2738 // We set the finger to bottom to ensure that the bitmap 2739 // iteration that will follow this will not do anything. 2740 // (this is not a condition that holds when we set the region up, 2741 // as the region is not supposed to be empty in the first place) 2742 _finger = bottom; 2743 } else if (limit >= _region_limit) { 2744 assert(limit >= _finger, "peace of mind"); 2745 } else { 2746 assert(limit < _region_limit, "only way to get here"); 2747 // This can happen under some pretty unusual circumstances. An 2748 // evacuation pause empties the region underneath our feet (NTAMS 2749 // at bottom). We then do some allocation in the region (NTAMS 2750 // stays at bottom), followed by the region being used as a GC 2751 // alloc region (NTAMS will move to top() and the objects 2752 // originally below it will be grayed). All objects now marked in 2753 // the region are explicitly grayed, if below the global finger, 2754 // and we do not need in fact to scan anything else. So, we simply 2755 // set _finger to be limit to ensure that the bitmap iteration 2756 // doesn't do anything. 2757 _finger = limit; 2758 } 2759 2760 _region_limit = limit; 2761 } 2762 2763 void G1CMTask::giveup_current_region() { 2764 assert(_curr_region != NULL, "invariant"); 2765 clear_region_fields(); 2766 } 2767 2768 void G1CMTask::clear_region_fields() { 2769 // Values for these three fields that indicate that we're not 2770 // holding on to a region. 2771 _curr_region = NULL; 2772 _finger = NULL; 2773 _region_limit = NULL; 2774 } 2775 2776 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2777 if (cm_oop_closure == NULL) { 2778 assert(_cm_oop_closure != NULL, "invariant"); 2779 } else { 2780 assert(_cm_oop_closure == NULL, "invariant"); 2781 } 2782 _cm_oop_closure = cm_oop_closure; 2783 } 2784 2785 void G1CMTask::reset(G1CMBitMap* nextMarkBitMap) { 2786 guarantee(nextMarkBitMap != NULL, "invariant"); 2787 _nextMarkBitMap = nextMarkBitMap; 2788 clear_region_fields(); 2789 2790 _calls = 0; 2791 _elapsed_time_ms = 0.0; 2792 _termination_time_ms = 0.0; 2793 _termination_start_time_ms = 0.0; 2794 } 2795 2796 bool G1CMTask::should_exit_termination() { 2797 regular_clock_call(); 2798 // This is called when we are in the termination protocol. We should 2799 // quit if, for some reason, this task wants to abort or the global 2800 // stack is not empty (this means that we can get work from it). 2801 return !_cm->mark_stack_empty() || has_aborted(); 2802 } 2803 2804 void G1CMTask::reached_limit() { 2805 assert(_words_scanned >= _words_scanned_limit || 2806 _refs_reached >= _refs_reached_limit , 2807 "shouldn't have been called otherwise"); 2808 regular_clock_call(); 2809 } 2810 2811 void G1CMTask::regular_clock_call() { 2812 if (has_aborted()) return; 2813 2814 // First, we need to recalculate the words scanned and refs reached 2815 // limits for the next clock call. 2816 recalculate_limits(); 2817 2818 // During the regular clock call we do the following 2819 2820 // (1) If an overflow has been flagged, then we abort. 2821 if (_cm->has_overflown()) { 2822 set_has_aborted(); 2823 return; 2824 } 2825 2826 // If we are not concurrent (i.e. we're doing remark) we don't need 2827 // to check anything else. The other steps are only needed during 2828 // the concurrent marking phase. 2829 if (!concurrent()) return; 2830 2831 // (2) If marking has been aborted for Full GC, then we also abort. 2832 if (_cm->has_aborted()) { 2833 set_has_aborted(); 2834 return; 2835 } 2836 2837 double curr_time_ms = os::elapsedVTime() * 1000.0; 2838 2839 // (4) We check whether we should yield. If we have to, then we abort. 2840 if (SuspendibleThreadSet::should_yield()) { 2841 // We should yield. To do this we abort the task. The caller is 2842 // responsible for yielding. 2843 set_has_aborted(); 2844 return; 2845 } 2846 2847 // (5) We check whether we've reached our time quota. If we have, 2848 // then we abort. 2849 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2850 if (elapsed_time_ms > _time_target_ms) { 2851 set_has_aborted(); 2852 _has_timed_out = true; 2853 return; 2854 } 2855 2856 // (6) Finally, we check whether there are enough completed STAB 2857 // buffers available for processing. If there are, we abort. 2858 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2859 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2860 // we do need to process SATB buffers, we'll abort and restart 2861 // the marking task to do so 2862 set_has_aborted(); 2863 return; 2864 } 2865 } 2866 2867 void G1CMTask::recalculate_limits() { 2868 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2869 _words_scanned_limit = _real_words_scanned_limit; 2870 2871 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2872 _refs_reached_limit = _real_refs_reached_limit; 2873 } 2874 2875 void G1CMTask::decrease_limits() { 2876 // This is called when we believe that we're going to do an infrequent 2877 // operation which will increase the per byte scanned cost (i.e. move 2878 // entries to/from the global stack). It basically tries to decrease the 2879 // scanning limit so that the clock is called earlier. 2880 2881 _words_scanned_limit = _real_words_scanned_limit - 2882 3 * words_scanned_period / 4; 2883 _refs_reached_limit = _real_refs_reached_limit - 2884 3 * refs_reached_period / 4; 2885 } 2886 2887 void G1CMTask::move_entries_to_global_stack() { 2888 // local array where we'll store the entries that will be popped 2889 // from the local queue 2890 oop buffer[global_stack_transfer_size]; 2891 2892 int n = 0; 2893 oop obj; 2894 while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { 2895 buffer[n] = obj; 2896 ++n; 2897 } 2898 2899 if (n > 0) { 2900 // we popped at least one entry from the local queue 2901 2902 if (!_cm->mark_stack_push(buffer, n)) { 2903 set_has_aborted(); 2904 } 2905 } 2906 2907 // this operation was quite expensive, so decrease the limits 2908 decrease_limits(); 2909 } 2910 2911 void G1CMTask::get_entries_from_global_stack() { 2912 // local array where we'll store the entries that will be popped 2913 // from the global stack. 2914 oop buffer[global_stack_transfer_size]; 2915 int n; 2916 _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); 2917 assert(n <= global_stack_transfer_size, 2918 "we should not pop more than the given limit"); 2919 if (n > 0) { 2920 // yes, we did actually pop at least one entry 2921 for (int i = 0; i < n; ++i) { 2922 bool success = _task_queue->push(buffer[i]); 2923 // We only call this when the local queue is empty or under a 2924 // given target limit. So, we do not expect this push to fail. 2925 assert(success, "invariant"); 2926 } 2927 } 2928 2929 // this operation was quite expensive, so decrease the limits 2930 decrease_limits(); 2931 } 2932 2933 void G1CMTask::drain_local_queue(bool partially) { 2934 if (has_aborted()) return; 2935 2936 // Decide what the target size is, depending whether we're going to 2937 // drain it partially (so that other tasks can steal if they run out 2938 // of things to do) or totally (at the very end). 2939 size_t target_size; 2940 if (partially) { 2941 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2942 } else { 2943 target_size = 0; 2944 } 2945 2946 if (_task_queue->size() > target_size) { 2947 oop obj; 2948 bool ret = _task_queue->pop_local(obj); 2949 while (ret) { 2950 assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); 2951 assert(!_g1h->is_on_master_free_list( 2952 _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); 2953 2954 scan_object(obj); 2955 2956 if (_task_queue->size() <= target_size || has_aborted()) { 2957 ret = false; 2958 } else { 2959 ret = _task_queue->pop_local(obj); 2960 } 2961 } 2962 } 2963 } 2964 2965 void G1CMTask::drain_global_stack(bool partially) { 2966 if (has_aborted()) return; 2967 2968 // We have a policy to drain the local queue before we attempt to 2969 // drain the global stack. 2970 assert(partially || _task_queue->size() == 0, "invariant"); 2971 2972 // Decide what the target size is, depending whether we're going to 2973 // drain it partially (so that other tasks can steal if they run out 2974 // of things to do) or totally (at the very end). Notice that, 2975 // because we move entries from the global stack in chunks or 2976 // because another task might be doing the same, we might in fact 2977 // drop below the target. But, this is not a problem. 2978 size_t target_size; 2979 if (partially) { 2980 target_size = _cm->partial_mark_stack_size_target(); 2981 } else { 2982 target_size = 0; 2983 } 2984 2985 if (_cm->mark_stack_size() > target_size) { 2986 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2987 get_entries_from_global_stack(); 2988 drain_local_queue(partially); 2989 } 2990 } 2991 } 2992 2993 // SATB Queue has several assumptions on whether to call the par or 2994 // non-par versions of the methods. this is why some of the code is 2995 // replicated. We should really get rid of the single-threaded version 2996 // of the code to simplify things. 2997 void G1CMTask::drain_satb_buffers() { 2998 if (has_aborted()) return; 2999 3000 // We set this so that the regular clock knows that we're in the 3001 // middle of draining buffers and doesn't set the abort flag when it 3002 // notices that SATB buffers are available for draining. It'd be 3003 // very counter productive if it did that. :-) 3004 _draining_satb_buffers = true; 3005 3006 G1CMSATBBufferClosure satb_cl(this, _g1h); 3007 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 3008 3009 // This keeps claiming and applying the closure to completed buffers 3010 // until we run out of buffers or we need to abort. 3011 while (!has_aborted() && 3012 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 3013 regular_clock_call(); 3014 } 3015 3016 _draining_satb_buffers = false; 3017 3018 assert(has_aborted() || 3019 concurrent() || 3020 satb_mq_set.completed_buffers_num() == 0, "invariant"); 3021 3022 // again, this was a potentially expensive operation, decrease the 3023 // limits to get the regular clock call early 3024 decrease_limits(); 3025 } 3026 3027 void G1CMTask::print_stats() { 3028 log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", 3029 _worker_id, _calls); 3030 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 3031 _elapsed_time_ms, _termination_time_ms); 3032 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 3033 _step_times_ms.num(), _step_times_ms.avg(), 3034 _step_times_ms.sd()); 3035 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 3036 _step_times_ms.maximum(), _step_times_ms.sum()); 3037 } 3038 3039 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { 3040 return _task_queues->steal(worker_id, hash_seed, obj); 3041 } 3042 3043 /***************************************************************************** 3044 3045 The do_marking_step(time_target_ms, ...) method is the building 3046 block of the parallel marking framework. It can be called in parallel 3047 with other invocations of do_marking_step() on different tasks 3048 (but only one per task, obviously) and concurrently with the 3049 mutator threads, or during remark, hence it eliminates the need 3050 for two versions of the code. When called during remark, it will 3051 pick up from where the task left off during the concurrent marking 3052 phase. Interestingly, tasks are also claimable during evacuation 3053 pauses too, since do_marking_step() ensures that it aborts before 3054 it needs to yield. 3055 3056 The data structures that it uses to do marking work are the 3057 following: 3058 3059 (1) Marking Bitmap. If there are gray objects that appear only 3060 on the bitmap (this happens either when dealing with an overflow 3061 or when the initial marking phase has simply marked the roots 3062 and didn't push them on the stack), then tasks claim heap 3063 regions whose bitmap they then scan to find gray objects. A 3064 global finger indicates where the end of the last claimed region 3065 is. A local finger indicates how far into the region a task has 3066 scanned. The two fingers are used to determine how to gray an 3067 object (i.e. whether simply marking it is OK, as it will be 3068 visited by a task in the future, or whether it needs to be also 3069 pushed on a stack). 3070 3071 (2) Local Queue. The local queue of the task which is accessed 3072 reasonably efficiently by the task. Other tasks can steal from 3073 it when they run out of work. Throughout the marking phase, a 3074 task attempts to keep its local queue short but not totally 3075 empty, so that entries are available for stealing by other 3076 tasks. Only when there is no more work, a task will totally 3077 drain its local queue. 3078 3079 (3) Global Mark Stack. This handles local queue overflow. During 3080 marking only sets of entries are moved between it and the local 3081 queues, as access to it requires a mutex and more fine-grain 3082 interaction with it which might cause contention. If it 3083 overflows, then the marking phase should restart and iterate 3084 over the bitmap to identify gray objects. Throughout the marking 3085 phase, tasks attempt to keep the global mark stack at a small 3086 length but not totally empty, so that entries are available for 3087 popping by other tasks. Only when there is no more work, tasks 3088 will totally drain the global mark stack. 3089 3090 (4) SATB Buffer Queue. This is where completed SATB buffers are 3091 made available. Buffers are regularly removed from this queue 3092 and scanned for roots, so that the queue doesn't get too 3093 long. During remark, all completed buffers are processed, as 3094 well as the filled in parts of any uncompleted buffers. 3095 3096 The do_marking_step() method tries to abort when the time target 3097 has been reached. There are a few other cases when the 3098 do_marking_step() method also aborts: 3099 3100 (1) When the marking phase has been aborted (after a Full GC). 3101 3102 (2) When a global overflow (on the global stack) has been 3103 triggered. Before the task aborts, it will actually sync up with 3104 the other tasks to ensure that all the marking data structures 3105 (local queues, stacks, fingers etc.) are re-initialized so that 3106 when do_marking_step() completes, the marking phase can 3107 immediately restart. 3108 3109 (3) When enough completed SATB buffers are available. The 3110 do_marking_step() method only tries to drain SATB buffers right 3111 at the beginning. So, if enough buffers are available, the 3112 marking step aborts and the SATB buffers are processed at 3113 the beginning of the next invocation. 3114 3115 (4) To yield. when we have to yield then we abort and yield 3116 right at the end of do_marking_step(). This saves us from a lot 3117 of hassle as, by yielding we might allow a Full GC. If this 3118 happens then objects will be compacted underneath our feet, the 3119 heap might shrink, etc. We save checking for this by just 3120 aborting and doing the yield right at the end. 3121 3122 From the above it follows that the do_marking_step() method should 3123 be called in a loop (or, otherwise, regularly) until it completes. 3124 3125 If a marking step completes without its has_aborted() flag being 3126 true, it means it has completed the current marking phase (and 3127 also all other marking tasks have done so and have all synced up). 3128 3129 A method called regular_clock_call() is invoked "regularly" (in 3130 sub ms intervals) throughout marking. It is this clock method that 3131 checks all the abort conditions which were mentioned above and 3132 decides when the task should abort. A work-based scheme is used to 3133 trigger this clock method: when the number of object words the 3134 marking phase has scanned or the number of references the marking 3135 phase has visited reach a given limit. Additional invocations to 3136 the method clock have been planted in a few other strategic places 3137 too. The initial reason for the clock method was to avoid calling 3138 vtime too regularly, as it is quite expensive. So, once it was in 3139 place, it was natural to piggy-back all the other conditions on it 3140 too and not constantly check them throughout the code. 3141 3142 If do_termination is true then do_marking_step will enter its 3143 termination protocol. 3144 3145 The value of is_serial must be true when do_marking_step is being 3146 called serially (i.e. by the VMThread) and do_marking_step should 3147 skip any synchronization in the termination and overflow code. 3148 Examples include the serial remark code and the serial reference 3149 processing closures. 3150 3151 The value of is_serial must be false when do_marking_step is 3152 being called by any of the worker threads in a work gang. 3153 Examples include the concurrent marking code (CMMarkingTask), 3154 the MT remark code, and the MT reference processing closures. 3155 3156 *****************************************************************************/ 3157 3158 void G1CMTask::do_marking_step(double time_target_ms, 3159 bool do_termination, 3160 bool is_serial) { 3161 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 3162 assert(concurrent() == _cm->concurrent(), "they should be the same"); 3163 3164 G1CollectorPolicy* g1_policy = _g1h->g1_policy(); 3165 assert(_task_queues != NULL, "invariant"); 3166 assert(_task_queue != NULL, "invariant"); 3167 assert(_task_queues->queue(_worker_id) == _task_queue, "invariant"); 3168 3169 assert(!_claimed, 3170 "only one thread should claim this task at any one time"); 3171 3172 // OK, this doesn't safeguard again all possible scenarios, as it is 3173 // possible for two threads to set the _claimed flag at the same 3174 // time. But it is only for debugging purposes anyway and it will 3175 // catch most problems. 3176 _claimed = true; 3177 3178 _start_time_ms = os::elapsedVTime() * 1000.0; 3179 3180 // If do_stealing is true then do_marking_step will attempt to 3181 // steal work from the other G1CMTasks. It only makes sense to 3182 // enable stealing when the termination protocol is enabled 3183 // and do_marking_step() is not being called serially. 3184 bool do_stealing = do_termination && !is_serial; 3185 3186 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 3187 _time_target_ms = time_target_ms - diff_prediction_ms; 3188 3189 // set up the variables that are used in the work-based scheme to 3190 // call the regular clock method 3191 _words_scanned = 0; 3192 _refs_reached = 0; 3193 recalculate_limits(); 3194 3195 // clear all flags 3196 clear_has_aborted(); 3197 _has_timed_out = false; 3198 _draining_satb_buffers = false; 3199 3200 ++_calls; 3201 3202 // Set up the bitmap and oop closures. Anything that uses them is 3203 // eventually called from this method, so it is OK to allocate these 3204 // statically. 3205 G1CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap); 3206 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 3207 set_cm_oop_closure(&cm_oop_closure); 3208 3209 if (_cm->has_overflown()) { 3210 // This can happen if the mark stack overflows during a GC pause 3211 // and this task, after a yield point, restarts. We have to abort 3212 // as we need to get into the overflow protocol which happens 3213 // right at the end of this task. 3214 set_has_aborted(); 3215 } 3216 3217 // First drain any available SATB buffers. After this, we will not 3218 // look at SATB buffers before the next invocation of this method. 3219 // If enough completed SATB buffers are queued up, the regular clock 3220 // will abort this task so that it restarts. 3221 drain_satb_buffers(); 3222 // ...then partially drain the local queue and the global stack 3223 drain_local_queue(true); 3224 drain_global_stack(true); 3225 3226 do { 3227 if (!has_aborted() && _curr_region != NULL) { 3228 // This means that we're already holding on to a region. 3229 assert(_finger != NULL, "if region is not NULL, then the finger " 3230 "should not be NULL either"); 3231 3232 // We might have restarted this task after an evacuation pause 3233 // which might have evacuated the region we're holding on to 3234 // underneath our feet. Let's read its limit again to make sure 3235 // that we do not iterate over a region of the heap that 3236 // contains garbage (update_region_limit() will also move 3237 // _finger to the start of the region if it is found empty). 3238 update_region_limit(); 3239 // We will start from _finger not from the start of the region, 3240 // as we might be restarting this task after aborting half-way 3241 // through scanning this region. In this case, _finger points to 3242 // the address where we last found a marked object. If this is a 3243 // fresh region, _finger points to start(). 3244 MemRegion mr = MemRegion(_finger, _region_limit); 3245 3246 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 3247 "humongous regions should go around loop once only"); 3248 3249 // Some special cases: 3250 // If the memory region is empty, we can just give up the region. 3251 // If the current region is humongous then we only need to check 3252 // the bitmap for the bit associated with the start of the object, 3253 // scan the object if it's live, and give up the region. 3254 // Otherwise, let's iterate over the bitmap of the part of the region 3255 // that is left. 3256 // If the iteration is successful, give up the region. 3257 if (mr.is_empty()) { 3258 giveup_current_region(); 3259 regular_clock_call(); 3260 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 3261 if (_nextMarkBitMap->isMarked(mr.start())) { 3262 // The object is marked - apply the closure 3263 BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start()); 3264 bitmap_closure.do_bit(offset); 3265 } 3266 // Even if this task aborted while scanning the humongous object 3267 // we can (and should) give up the current region. 3268 giveup_current_region(); 3269 regular_clock_call(); 3270 } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) { 3271 giveup_current_region(); 3272 regular_clock_call(); 3273 } else { 3274 assert(has_aborted(), "currently the only way to do so"); 3275 // The only way to abort the bitmap iteration is to return 3276 // false from the do_bit() method. However, inside the 3277 // do_bit() method we move the _finger to point to the 3278 // object currently being looked at. So, if we bail out, we 3279 // have definitely set _finger to something non-null. 3280 assert(_finger != NULL, "invariant"); 3281 3282 // Region iteration was actually aborted. So now _finger 3283 // points to the address of the object we last scanned. If we 3284 // leave it there, when we restart this task, we will rescan 3285 // the object. It is easy to avoid this. We move the finger by 3286 // enough to point to the next possible object header (the 3287 // bitmap knows by how much we need to move it as it knows its 3288 // granularity). 3289 assert(_finger < _region_limit, "invariant"); 3290 HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger); 3291 // Check if bitmap iteration was aborted while scanning the last object 3292 if (new_finger >= _region_limit) { 3293 giveup_current_region(); 3294 } else { 3295 move_finger_to(new_finger); 3296 } 3297 } 3298 } 3299 // At this point we have either completed iterating over the 3300 // region we were holding on to, or we have aborted. 3301 3302 // We then partially drain the local queue and the global stack. 3303 // (Do we really need this?) 3304 drain_local_queue(true); 3305 drain_global_stack(true); 3306 3307 // Read the note on the claim_region() method on why it might 3308 // return NULL with potentially more regions available for 3309 // claiming and why we have to check out_of_regions() to determine 3310 // whether we're done or not. 3311 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 3312 // We are going to try to claim a new region. We should have 3313 // given up on the previous one. 3314 // Separated the asserts so that we know which one fires. 3315 assert(_curr_region == NULL, "invariant"); 3316 assert(_finger == NULL, "invariant"); 3317 assert(_region_limit == NULL, "invariant"); 3318 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 3319 if (claimed_region != NULL) { 3320 // Yes, we managed to claim one 3321 setup_for_region(claimed_region); 3322 assert(_curr_region == claimed_region, "invariant"); 3323 } 3324 // It is important to call the regular clock here. It might take 3325 // a while to claim a region if, for example, we hit a large 3326 // block of empty regions. So we need to call the regular clock 3327 // method once round the loop to make sure it's called 3328 // frequently enough. 3329 regular_clock_call(); 3330 } 3331 3332 if (!has_aborted() && _curr_region == NULL) { 3333 assert(_cm->out_of_regions(), 3334 "at this point we should be out of regions"); 3335 } 3336 } while ( _curr_region != NULL && !has_aborted()); 3337 3338 if (!has_aborted()) { 3339 // We cannot check whether the global stack is empty, since other 3340 // tasks might be pushing objects to it concurrently. 3341 assert(_cm->out_of_regions(), 3342 "at this point we should be out of regions"); 3343 // Try to reduce the number of available SATB buffers so that 3344 // remark has less work to do. 3345 drain_satb_buffers(); 3346 } 3347 3348 // Since we've done everything else, we can now totally drain the 3349 // local queue and global stack. 3350 drain_local_queue(false); 3351 drain_global_stack(false); 3352 3353 // Attempt at work stealing from other task's queues. 3354 if (do_stealing && !has_aborted()) { 3355 // We have not aborted. This means that we have finished all that 3356 // we could. Let's try to do some stealing... 3357 3358 // We cannot check whether the global stack is empty, since other 3359 // tasks might be pushing objects to it concurrently. 3360 assert(_cm->out_of_regions() && _task_queue->size() == 0, 3361 "only way to reach here"); 3362 while (!has_aborted()) { 3363 oop obj; 3364 if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) { 3365 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), 3366 "any stolen object should be marked"); 3367 scan_object(obj); 3368 3369 // And since we're towards the end, let's totally drain the 3370 // local queue and global stack. 3371 drain_local_queue(false); 3372 drain_global_stack(false); 3373 } else { 3374 break; 3375 } 3376 } 3377 } 3378 3379 // We still haven't aborted. Now, let's try to get into the 3380 // termination protocol. 3381 if (do_termination && !has_aborted()) { 3382 // We cannot check whether the global stack is empty, since other 3383 // tasks might be concurrently pushing objects on it. 3384 // Separated the asserts so that we know which one fires. 3385 assert(_cm->out_of_regions(), "only way to reach here"); 3386 assert(_task_queue->size() == 0, "only way to reach here"); 3387 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 3388 3389 // The G1CMTask class also extends the TerminatorTerminator class, 3390 // hence its should_exit_termination() method will also decide 3391 // whether to exit the termination protocol or not. 3392 bool finished = (is_serial || 3393 _cm->terminator()->offer_termination(this)); 3394 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 3395 _termination_time_ms += 3396 termination_end_time_ms - _termination_start_time_ms; 3397 3398 if (finished) { 3399 // We're all done. 3400 3401 if (_worker_id == 0) { 3402 // let's allow task 0 to do this 3403 if (concurrent()) { 3404 assert(_cm->concurrent_marking_in_progress(), "invariant"); 3405 // we need to set this to false before the next 3406 // safepoint. This way we ensure that the marking phase 3407 // doesn't observe any more heap expansions. 3408 _cm->clear_concurrent_marking_in_progress(); 3409 } 3410 } 3411 3412 // We can now guarantee that the global stack is empty, since 3413 // all other tasks have finished. We separated the guarantees so 3414 // that, if a condition is false, we can immediately find out 3415 // which one. 3416 guarantee(_cm->out_of_regions(), "only way to reach here"); 3417 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 3418 guarantee(_task_queue->size() == 0, "only way to reach here"); 3419 guarantee(!_cm->has_overflown(), "only way to reach here"); 3420 guarantee(!_cm->mark_stack_overflow(), "only way to reach here"); 3421 } else { 3422 // Apparently there's more work to do. Let's abort this task. It 3423 // will restart it and we can hopefully find more things to do. 3424 set_has_aborted(); 3425 } 3426 } 3427 3428 // Mainly for debugging purposes to make sure that a pointer to the 3429 // closure which was statically allocated in this frame doesn't 3430 // escape it by accident. 3431 set_cm_oop_closure(NULL); 3432 double end_time_ms = os::elapsedVTime() * 1000.0; 3433 double elapsed_time_ms = end_time_ms - _start_time_ms; 3434 // Update the step history. 3435 _step_times_ms.add(elapsed_time_ms); 3436 3437 if (has_aborted()) { 3438 // The task was aborted for some reason. 3439 if (_has_timed_out) { 3440 double diff_ms = elapsed_time_ms - _time_target_ms; 3441 // Keep statistics of how well we did with respect to hitting 3442 // our target only if we actually timed out (if we aborted for 3443 // other reasons, then the results might get skewed). 3444 _marking_step_diffs_ms.add(diff_ms); 3445 } 3446 3447 if (_cm->has_overflown()) { 3448 // This is the interesting one. We aborted because a global 3449 // overflow was raised. This means we have to restart the 3450 // marking phase and start iterating over regions. However, in 3451 // order to do this we have to make sure that all tasks stop 3452 // what they are doing and re-initialize in a safe manner. We 3453 // will achieve this with the use of two barrier sync points. 3454 3455 if (!is_serial) { 3456 // We only need to enter the sync barrier if being called 3457 // from a parallel context 3458 _cm->enter_first_sync_barrier(_worker_id); 3459 3460 // When we exit this sync barrier we know that all tasks have 3461 // stopped doing marking work. So, it's now safe to 3462 // re-initialize our data structures. At the end of this method, 3463 // task 0 will clear the global data structures. 3464 } 3465 3466 // We clear the local state of this task... 3467 clear_region_fields(); 3468 3469 if (!is_serial) { 3470 // ...and enter the second barrier. 3471 _cm->enter_second_sync_barrier(_worker_id); 3472 } 3473 // At this point, if we're during the concurrent phase of 3474 // marking, everything has been re-initialized and we're 3475 // ready to restart. 3476 } 3477 } 3478 3479 _claimed = false; 3480 } 3481 3482 G1CMTask::G1CMTask(uint worker_id, 3483 G1ConcurrentMark* cm, 3484 size_t* marked_bytes, 3485 BitMap* card_bm, 3486 G1CMTaskQueue* task_queue, 3487 G1CMTaskQueueSet* task_queues) 3488 : _g1h(G1CollectedHeap::heap()), 3489 _worker_id(worker_id), _cm(cm), 3490 _claimed(false), 3491 _nextMarkBitMap(NULL), _hash_seed(17), 3492 _task_queue(task_queue), 3493 _task_queues(task_queues), 3494 _cm_oop_closure(NULL), 3495 _marked_bytes_array(marked_bytes), 3496 _card_bm(card_bm) { 3497 guarantee(task_queue != NULL, "invariant"); 3498 guarantee(task_queues != NULL, "invariant"); 3499 3500 _marking_step_diffs_ms.add(0.5); 3501 } 3502 3503 // These are formatting macros that are used below to ensure 3504 // consistent formatting. The *_H_* versions are used to format the 3505 // header for a particular value and they should be kept consistent 3506 // with the corresponding macro. Also note that most of the macros add 3507 // the necessary white space (as a prefix) which makes them a bit 3508 // easier to compose. 3509 3510 // All the output lines are prefixed with this string to be able to 3511 // identify them easily in a large log file. 3512 #define G1PPRL_LINE_PREFIX "###" 3513 3514 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 3515 #ifdef _LP64 3516 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 3517 #else // _LP64 3518 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 3519 #endif // _LP64 3520 3521 // For per-region info 3522 #define G1PPRL_TYPE_FORMAT " %-4s" 3523 #define G1PPRL_TYPE_H_FORMAT " %4s" 3524 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 3525 #define G1PPRL_BYTE_H_FORMAT " %9s" 3526 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 3527 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 3528 3529 // For summary info 3530 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 3531 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 3532 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 3533 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 3534 3535 G1PrintRegionLivenessInfoClosure:: 3536 G1PrintRegionLivenessInfoClosure(const char* phase_name) 3537 : _total_used_bytes(0), _total_capacity_bytes(0), 3538 _total_prev_live_bytes(0), _total_next_live_bytes(0), 3539 _hum_used_bytes(0), _hum_capacity_bytes(0), 3540 _hum_prev_live_bytes(0), _hum_next_live_bytes(0), 3541 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { 3542 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 3543 MemRegion g1_reserved = g1h->g1_reserved(); 3544 double now = os::elapsedTime(); 3545 3546 // Print the header of the output. 3547 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 3548 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 3549 G1PPRL_SUM_ADDR_FORMAT("reserved") 3550 G1PPRL_SUM_BYTE_FORMAT("region-size"), 3551 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 3552 HeapRegion::GrainBytes); 3553 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3554 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3555 G1PPRL_TYPE_H_FORMAT 3556 G1PPRL_ADDR_BASE_H_FORMAT 3557 G1PPRL_BYTE_H_FORMAT 3558 G1PPRL_BYTE_H_FORMAT 3559 G1PPRL_BYTE_H_FORMAT 3560 G1PPRL_DOUBLE_H_FORMAT 3561 G1PPRL_BYTE_H_FORMAT 3562 G1PPRL_BYTE_H_FORMAT, 3563 "type", "address-range", 3564 "used", "prev-live", "next-live", "gc-eff", 3565 "remset", "code-roots"); 3566 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3567 G1PPRL_TYPE_H_FORMAT 3568 G1PPRL_ADDR_BASE_H_FORMAT 3569 G1PPRL_BYTE_H_FORMAT 3570 G1PPRL_BYTE_H_FORMAT 3571 G1PPRL_BYTE_H_FORMAT 3572 G1PPRL_DOUBLE_H_FORMAT 3573 G1PPRL_BYTE_H_FORMAT 3574 G1PPRL_BYTE_H_FORMAT, 3575 "", "", 3576 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 3577 "(bytes)", "(bytes)"); 3578 } 3579 3580 // It takes as a parameter a reference to one of the _hum_* fields, it 3581 // deduces the corresponding value for a region in a humongous region 3582 // series (either the region size, or what's left if the _hum_* field 3583 // is < the region size), and updates the _hum_* field accordingly. 3584 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { 3585 size_t bytes = 0; 3586 // The > 0 check is to deal with the prev and next live bytes which 3587 // could be 0. 3588 if (*hum_bytes > 0) { 3589 bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes); 3590 *hum_bytes -= bytes; 3591 } 3592 return bytes; 3593 } 3594 3595 // It deduces the values for a region in a humongous region series 3596 // from the _hum_* fields and updates those accordingly. It assumes 3597 // that that _hum_* fields have already been set up from the "starts 3598 // humongous" region and we visit the regions in address order. 3599 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, 3600 size_t* capacity_bytes, 3601 size_t* prev_live_bytes, 3602 size_t* next_live_bytes) { 3603 assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); 3604 *used_bytes = get_hum_bytes(&_hum_used_bytes); 3605 *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); 3606 *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); 3607 *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); 3608 } 3609 3610 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 3611 const char* type = r->get_type_str(); 3612 HeapWord* bottom = r->bottom(); 3613 HeapWord* end = r->end(); 3614 size_t capacity_bytes = r->capacity(); 3615 size_t used_bytes = r->used(); 3616 size_t prev_live_bytes = r->live_bytes(); 3617 size_t next_live_bytes = r->next_live_bytes(); 3618 double gc_eff = r->gc_efficiency(); 3619 size_t remset_bytes = r->rem_set()->mem_size(); 3620 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 3621 3622 if (r->is_starts_humongous()) { 3623 assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && 3624 _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, 3625 "they should have been zeroed after the last time we used them"); 3626 // Set up the _hum_* fields. 3627 _hum_capacity_bytes = capacity_bytes; 3628 _hum_used_bytes = used_bytes; 3629 _hum_prev_live_bytes = prev_live_bytes; 3630 _hum_next_live_bytes = next_live_bytes; 3631 get_hum_bytes(&used_bytes, &capacity_bytes, 3632 &prev_live_bytes, &next_live_bytes); 3633 end = bottom + HeapRegion::GrainWords; 3634 } else if (r->is_continues_humongous()) { 3635 get_hum_bytes(&used_bytes, &capacity_bytes, 3636 &prev_live_bytes, &next_live_bytes); 3637 assert(end == bottom + HeapRegion::GrainWords, "invariant"); 3638 } 3639 3640 _total_used_bytes += used_bytes; 3641 _total_capacity_bytes += capacity_bytes; 3642 _total_prev_live_bytes += prev_live_bytes; 3643 _total_next_live_bytes += next_live_bytes; 3644 _total_remset_bytes += remset_bytes; 3645 _total_strong_code_roots_bytes += strong_code_roots_bytes; 3646 3647 // Print a line for this particular region. 3648 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3649 G1PPRL_TYPE_FORMAT 3650 G1PPRL_ADDR_BASE_FORMAT 3651 G1PPRL_BYTE_FORMAT 3652 G1PPRL_BYTE_FORMAT 3653 G1PPRL_BYTE_FORMAT 3654 G1PPRL_DOUBLE_FORMAT 3655 G1PPRL_BYTE_FORMAT 3656 G1PPRL_BYTE_FORMAT, 3657 type, p2i(bottom), p2i(end), 3658 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 3659 remset_bytes, strong_code_roots_bytes); 3660 3661 return false; 3662 } 3663 3664 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 3665 // add static memory usages to remembered set sizes 3666 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 3667 // Print the footer of the output. 3668 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 3669 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 3670 " SUMMARY" 3671 G1PPRL_SUM_MB_FORMAT("capacity") 3672 G1PPRL_SUM_MB_PERC_FORMAT("used") 3673 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 3674 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 3675 G1PPRL_SUM_MB_FORMAT("remset") 3676 G1PPRL_SUM_MB_FORMAT("code-roots"), 3677 bytes_to_mb(_total_capacity_bytes), 3678 bytes_to_mb(_total_used_bytes), 3679 perc(_total_used_bytes, _total_capacity_bytes), 3680 bytes_to_mb(_total_prev_live_bytes), 3681 perc(_total_prev_live_bytes, _total_capacity_bytes), 3682 bytes_to_mb(_total_next_live_bytes), 3683 perc(_total_next_live_bytes, _total_capacity_bytes), 3684 bytes_to_mb(_total_remset_bytes), 3685 bytes_to_mb(_total_strong_code_roots_bytes)); 3686 }