1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/metadataOnStackMark.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/g1/concurrentMarkThread.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1ConcurrentMark.inline.hpp" 33 #include "gc/g1/g1HeapVerifier.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1CardLiveData.inline.hpp" 36 #include "gc/g1/g1Policy.hpp" 37 #include "gc/g1/g1StringDedup.hpp" 38 #include "gc/g1/heapRegion.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "gc/g1/heapRegionSet.inline.hpp" 41 #include "gc/shared/gcId.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/genOopClosures.inline.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/strongRootsScope.hpp" 48 #include "gc/shared/suspendibleThreadSet.hpp" 49 #include "gc/shared/taskqueue.inline.hpp" 50 #include "gc/shared/vmGCOperations.hpp" 51 #include "gc/shared/weakProcessor.hpp" 52 #include "logging/log.hpp" 53 #include "memory/allocation.hpp" 54 #include "memory/resourceArea.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "runtime/atomic.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/java.hpp" 59 #include "runtime/prefetch.inline.hpp" 60 #include "services/memTracker.hpp" 61 #include "utilities/align.hpp" 62 #include "utilities/growableArray.hpp" 63 64 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) { 65 assert(addr < _cm->finger(), "invariant"); 66 assert(addr >= _task->finger(), "invariant"); 67 68 // We move that task's local finger along. 69 _task->move_finger_to(addr); 70 71 _task->scan_task_entry(G1TaskQueueEntry::from_oop(oop(addr))); 72 // we only partially drain the local queue and global stack 73 _task->drain_local_queue(true); 74 _task->drain_global_stack(true); 75 76 // if the has_aborted flag has been raised, we need to bail out of 77 // the iteration 78 return !_task->has_aborted(); 79 } 80 81 G1CMMarkStack::G1CMMarkStack() : 82 _max_chunk_capacity(0), 83 _base(NULL), 84 _chunk_capacity(0) { 85 set_empty(); 86 } 87 88 bool G1CMMarkStack::resize(size_t new_capacity) { 89 assert(is_empty(), "Only resize when stack is empty."); 90 assert(new_capacity <= _max_chunk_capacity, 91 "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); 92 93 TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC); 94 95 if (new_base == NULL) { 96 log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); 97 return false; 98 } 99 // Release old mapping. 100 if (_base != NULL) { 101 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 102 } 103 104 _base = new_base; 105 _chunk_capacity = new_capacity; 106 set_empty(); 107 108 return true; 109 } 110 111 size_t G1CMMarkStack::capacity_alignment() { 112 return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry); 113 } 114 115 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { 116 guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); 117 118 size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry); 119 120 _max_chunk_capacity = align_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 121 size_t initial_chunk_capacity = align_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar; 122 123 guarantee(initial_chunk_capacity <= _max_chunk_capacity, 124 "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, 125 _max_chunk_capacity, 126 initial_chunk_capacity); 127 128 log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, 129 initial_chunk_capacity, _max_chunk_capacity); 130 131 return resize(initial_chunk_capacity); 132 } 133 134 void G1CMMarkStack::expand() { 135 if (_chunk_capacity == _max_chunk_capacity) { 136 log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); 137 return; 138 } 139 size_t old_capacity = _chunk_capacity; 140 // Double capacity if possible 141 size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); 142 143 if (resize(new_capacity)) { 144 log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 145 old_capacity, new_capacity); 146 } else { 147 log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", 148 old_capacity, new_capacity); 149 } 150 } 151 152 G1CMMarkStack::~G1CMMarkStack() { 153 if (_base != NULL) { 154 MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity); 155 } 156 } 157 158 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { 159 elem->next = *list; 160 *list = elem; 161 } 162 163 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { 164 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 165 add_chunk_to_list(&_chunk_list, elem); 166 _chunks_in_chunk_list++; 167 } 168 169 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { 170 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 171 add_chunk_to_list(&_free_list, elem); 172 } 173 174 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { 175 TaskQueueEntryChunk* result = *list; 176 if (result != NULL) { 177 *list = (*list)->next; 178 } 179 return result; 180 } 181 182 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { 183 MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); 184 TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list); 185 if (result != NULL) { 186 _chunks_in_chunk_list--; 187 } 188 return result; 189 } 190 191 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() { 192 MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); 193 return remove_chunk_from_list(&_free_list); 194 } 195 196 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { 197 // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. 198 // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding 199 // wraparound of _hwm. 200 if (_hwm >= _chunk_capacity) { 201 return NULL; 202 } 203 204 size_t cur_idx = Atomic::add(1u, &_hwm) - 1; 205 if (cur_idx >= _chunk_capacity) { 206 return NULL; 207 } 208 209 TaskQueueEntryChunk* result = ::new (&_base[cur_idx]) TaskQueueEntryChunk; 210 result->next = NULL; 211 return result; 212 } 213 214 bool G1CMMarkStack::par_push_chunk(G1TaskQueueEntry* ptr_arr) { 215 // Get a new chunk. 216 TaskQueueEntryChunk* new_chunk = remove_chunk_from_free_list(); 217 218 if (new_chunk == NULL) { 219 // Did not get a chunk from the free list. Allocate from backing memory. 220 new_chunk = allocate_new_chunk(); 221 222 if (new_chunk == NULL) { 223 return false; 224 } 225 } 226 227 Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 228 229 add_chunk_to_chunk_list(new_chunk); 230 231 return true; 232 } 233 234 bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { 235 TaskQueueEntryChunk* cur = remove_chunk_from_chunk_list(); 236 237 if (cur == NULL) { 238 return false; 239 } 240 241 Copy::conjoint_memory_atomic(cur->data, ptr_arr, EntriesPerChunk * sizeof(G1TaskQueueEntry)); 242 243 add_chunk_to_free_list(cur); 244 return true; 245 } 246 247 void G1CMMarkStack::set_empty() { 248 _chunks_in_chunk_list = 0; 249 _hwm = 0; 250 _chunk_list = NULL; 251 _free_list = NULL; 252 } 253 254 G1CMRootRegions::G1CMRootRegions() : 255 _cm(NULL), _scan_in_progress(false), 256 _should_abort(false), _claimed_survivor_index(0) { } 257 258 void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) { 259 _survivors = survivors; 260 _cm = cm; 261 } 262 263 void G1CMRootRegions::prepare_for_scan() { 264 assert(!scan_in_progress(), "pre-condition"); 265 266 // Currently, only survivors can be root regions. 267 _claimed_survivor_index = 0; 268 _scan_in_progress = _survivors->regions()->is_nonempty(); 269 _should_abort = false; 270 } 271 272 HeapRegion* G1CMRootRegions::claim_next() { 273 if (_should_abort) { 274 // If someone has set the should_abort flag, we return NULL to 275 // force the caller to bail out of their loop. 276 return NULL; 277 } 278 279 // Currently, only survivors can be root regions. 280 const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions(); 281 282 int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1; 283 if (claimed_index < survivor_regions->length()) { 284 return survivor_regions->at(claimed_index); 285 } 286 return NULL; 287 } 288 289 uint G1CMRootRegions::num_root_regions() const { 290 return (uint)_survivors->regions()->length(); 291 } 292 293 void G1CMRootRegions::notify_scan_done() { 294 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 295 _scan_in_progress = false; 296 RootRegionScan_lock->notify_all(); 297 } 298 299 void G1CMRootRegions::cancel_scan() { 300 notify_scan_done(); 301 } 302 303 void G1CMRootRegions::scan_finished() { 304 assert(scan_in_progress(), "pre-condition"); 305 306 // Currently, only survivors can be root regions. 307 if (!_should_abort) { 308 assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index); 309 assert((uint)_claimed_survivor_index >= _survivors->length(), 310 "we should have claimed all survivors, claimed index = %u, length = %u", 311 (uint)_claimed_survivor_index, _survivors->length()); 312 } 313 314 notify_scan_done(); 315 } 316 317 bool G1CMRootRegions::wait_until_scan_finished() { 318 if (!scan_in_progress()) return false; 319 320 { 321 MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag); 322 while (scan_in_progress()) { 323 RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag); 324 } 325 } 326 return true; 327 } 328 329 // Returns the maximum number of workers to be used in a concurrent 330 // phase based on the number of GC workers being used in a STW 331 // phase. 332 static uint scale_concurrent_worker_threads(uint num_gc_workers) { 333 return MAX2((num_gc_workers + 2) / 4, 1U); 334 } 335 336 G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, 337 G1RegionToSpaceMapper* prev_bitmap_storage, 338 G1RegionToSpaceMapper* next_bitmap_storage) : 339 // _cm_thread set inside the constructor 340 _g1h(g1h), 341 _completed_initialization(false), 342 343 _cleanup_list("Concurrent Mark Cleanup List"), 344 _mark_bitmap_1(), 345 _mark_bitmap_2(), 346 _prev_mark_bitmap(&_mark_bitmap_1), 347 _next_mark_bitmap(&_mark_bitmap_2), 348 349 _heap_start(_g1h->reserved_region().start()), 350 _heap_end(_g1h->reserved_region().end()), 351 352 _root_regions(), 353 354 _global_mark_stack(), 355 356 // _finger set in set_non_marking_state 357 358 _max_num_tasks(ParallelGCThreads), 359 // _num_active_tasks set in set_non_marking_state() 360 // _tasks set inside the constructor 361 362 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)), 363 _terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)), 364 365 _first_overflow_barrier_sync(), 366 _second_overflow_barrier_sync(), 367 368 _has_overflown(false), 369 _concurrent(false), 370 _has_aborted(false), 371 _restart_for_overflow(false), 372 _concurrent_marking_in_progress(false), 373 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 374 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()), 375 376 // _verbose_level set below 377 378 _init_times(), 379 _remark_times(), 380 _remark_mark_times(), 381 _remark_weak_ref_times(), 382 _cleanup_times(), 383 _total_counting_time(0.0), 384 _total_rs_scrub_time(0.0), 385 386 _accum_task_vtime(NULL), 387 388 _concurrent_workers(NULL), 389 _num_concurrent_workers(0), 390 _max_concurrent_workers(0) 391 { 392 _mark_bitmap_1.initialize(g1h->reserved_region(), prev_bitmap_storage); 393 _mark_bitmap_2.initialize(g1h->reserved_region(), next_bitmap_storage); 394 395 // Create & start ConcurrentMark thread. 396 _cm_thread = new ConcurrentMarkThread(this); 397 if (_cm_thread->osthread() == NULL) { 398 vm_shutdown_during_initialization("Could not create ConcurrentMarkThread"); 399 } 400 401 assert(CGC_lock != NULL, "CGC_lock must be initialized"); 402 403 SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); 404 satb_qs.set_buffer_size(G1SATBBufferSize); 405 406 _root_regions.init(_g1h->survivor(), this); 407 408 if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) { 409 // Calculate the number of concurrent worker threads by scaling 410 // the number of parallel GC threads. 411 uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads); 412 FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num); 413 } 414 415 assert(ConcGCThreads > 0, "ConcGCThreads have been set."); 416 if (ConcGCThreads > ParallelGCThreads) { 417 log_warning(gc)("More ConcGCThreads (%u) than ParallelGCThreads (%u).", 418 ConcGCThreads, ParallelGCThreads); 419 return; 420 } 421 422 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 423 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 424 425 _num_concurrent_workers = ConcGCThreads; 426 _max_concurrent_workers = _num_concurrent_workers; 427 428 _concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true); 429 _concurrent_workers->initialize_workers(); 430 431 if (FLAG_IS_DEFAULT(MarkStackSize)) { 432 size_t mark_stack_size = 433 MIN2(MarkStackSizeMax, 434 MAX2(MarkStackSize, (size_t) (_max_concurrent_workers * TASKQUEUE_SIZE))); 435 // Verify that the calculated value for MarkStackSize is in range. 436 // It would be nice to use the private utility routine from Arguments. 437 if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) { 438 log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): " 439 "must be between 1 and " SIZE_FORMAT, 440 mark_stack_size, MarkStackSizeMax); 441 return; 442 } 443 FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size); 444 } else { 445 // Verify MarkStackSize is in range. 446 if (FLAG_IS_CMDLINE(MarkStackSize)) { 447 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) { 448 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 449 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): " 450 "must be between 1 and " SIZE_FORMAT, 451 MarkStackSize, MarkStackSizeMax); 452 return; 453 } 454 } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) { 455 if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) { 456 log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")" 457 " or for MarkStackSizeMax (" SIZE_FORMAT ")", 458 MarkStackSize, MarkStackSizeMax); 459 return; 460 } 461 } 462 } 463 } 464 465 if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { 466 vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); 467 } 468 469 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC); 470 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC); 471 472 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail 473 _num_active_tasks = _max_num_tasks; 474 475 for (uint i = 0; i < _max_num_tasks; ++i) { 476 G1CMTaskQueue* task_queue = new G1CMTaskQueue(); 477 task_queue->initialize(); 478 _task_queues->register_queue(i, task_queue); 479 480 _tasks[i] = new G1CMTask(i, this, task_queue); 481 482 _accum_task_vtime[i] = 0.0; 483 } 484 485 set_non_marking_state(); 486 _completed_initialization = true; 487 } 488 489 void G1ConcurrentMark::reset() { 490 // Starting values for these two. This should be called in a STW 491 // phase. 492 MemRegion reserved = _g1h->g1_reserved(); 493 _heap_start = reserved.start(); 494 _heap_end = reserved.end(); 495 496 // Separated the asserts so that we know which one fires. 497 assert(_heap_start != NULL, "heap bounds should look ok"); 498 assert(_heap_end != NULL, "heap bounds should look ok"); 499 assert(_heap_start < _heap_end, "heap bounds should look ok"); 500 501 // Reset all the marking data structures and any necessary flags 502 reset_marking_state(); 503 504 // We reset all of them, since different phases will use 505 // different number of active threads. So, it's easiest to have all 506 // of them ready. 507 for (uint i = 0; i < _max_num_tasks; ++i) { 508 _tasks[i]->reset(_next_mark_bitmap); 509 } 510 511 // we need this to make sure that the flag is on during the evac 512 // pause with initial mark piggy-backed 513 set_concurrent_marking_in_progress(); 514 } 515 516 517 void G1ConcurrentMark::reset_marking_state() { 518 _global_mark_stack.set_empty(); 519 520 // Expand the marking stack, if we have to and if we can. 521 if (has_overflown()) { 522 _global_mark_stack.expand(); 523 } 524 525 clear_has_overflown(); 526 _finger = _heap_start; 527 528 for (uint i = 0; i < _max_num_tasks; ++i) { 529 G1CMTaskQueue* queue = _task_queues->queue(i); 530 queue->set_empty(); 531 } 532 } 533 534 void G1ConcurrentMark::set_concurrency(uint active_tasks) { 535 assert(active_tasks <= _max_num_tasks, "we should not have more"); 536 537 _num_active_tasks = active_tasks; 538 // Need to update the three data structures below according to the 539 // number of active threads for this phase. 540 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues); 541 _first_overflow_barrier_sync.set_n_workers((int) active_tasks); 542 _second_overflow_barrier_sync.set_n_workers((int) active_tasks); 543 } 544 545 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) { 546 set_concurrency(active_tasks); 547 548 _concurrent = concurrent; 549 // We propagate this to all tasks, not just the active ones. 550 for (uint i = 0; i < _max_num_tasks; ++i) { 551 _tasks[i]->set_concurrent(concurrent); 552 } 553 554 if (concurrent) { 555 set_concurrent_marking_in_progress(); 556 } else { 557 // We currently assume that the concurrent flag has been set to 558 // false before we start remark. At this point we should also be 559 // in a STW phase. 560 assert(!concurrent_marking_in_progress(), "invariant"); 561 assert(out_of_regions(), 562 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT, 563 p2i(_finger), p2i(_heap_end)); 564 } 565 } 566 567 void G1ConcurrentMark::set_non_marking_state() { 568 // We set the global marking state to some default values when we're 569 // not doing marking. 570 reset_marking_state(); 571 _num_active_tasks = 0; 572 clear_concurrent_marking_in_progress(); 573 } 574 575 G1ConcurrentMark::~G1ConcurrentMark() { 576 // The G1ConcurrentMark instance is never freed. 577 ShouldNotReachHere(); 578 } 579 580 class G1ClearBitMapTask : public AbstractGangTask { 581 public: 582 static size_t chunk_size() { return M; } 583 584 private: 585 // Heap region closure used for clearing the given mark bitmap. 586 class G1ClearBitmapHRClosure : public HeapRegionClosure { 587 private: 588 G1CMBitMap* _bitmap; 589 G1ConcurrentMark* _cm; 590 public: 591 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { 592 } 593 594 virtual bool doHeapRegion(HeapRegion* r) { 595 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; 596 597 HeapWord* cur = r->bottom(); 598 HeapWord* const end = r->end(); 599 600 while (cur < end) { 601 MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); 602 _bitmap->clear_range(mr); 603 604 cur += chunk_size_in_words; 605 606 // Abort iteration if after yielding the marking has been aborted. 607 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) { 608 return true; 609 } 610 // Repeat the asserts from before the start of the closure. We will do them 611 // as asserts here to minimize their overhead on the product. However, we 612 // will have them as guarantees at the beginning / end of the bitmap 613 // clearing to get some checking in the product. 614 assert(_cm == NULL || _cm->cm_thread()->during_cycle(), "invariant"); 615 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant"); 616 } 617 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index()); 618 619 return false; 620 } 621 }; 622 623 G1ClearBitmapHRClosure _cl; 624 HeapRegionClaimer _hr_claimer; 625 bool _suspendible; // If the task is suspendible, workers must join the STS. 626 627 public: 628 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) : 629 AbstractGangTask("G1 Clear Bitmap"), 630 _cl(bitmap, suspendible ? cm : NULL), 631 _hr_claimer(n_workers), 632 _suspendible(suspendible) 633 { } 634 635 void work(uint worker_id) { 636 SuspendibleThreadSetJoiner sts_join(_suspendible); 637 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id); 638 } 639 640 bool is_complete() { 641 return _cl.complete(); 642 } 643 }; 644 645 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) { 646 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint."); 647 648 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor(); 649 size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size(); 650 651 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 652 653 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield); 654 655 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks); 656 workers->run_task(&cl, num_workers); 657 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding."); 658 } 659 660 void G1ConcurrentMark::cleanup_for_next_mark() { 661 // Make sure that the concurrent mark thread looks to still be in 662 // the current cycle. 663 guarantee(cm_thread()->during_cycle(), "invariant"); 664 665 // We are finishing up the current cycle by clearing the next 666 // marking bitmap and getting it ready for the next cycle. During 667 // this time no other cycle can start. So, let's make sure that this 668 // is the case. 669 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 670 671 clear_bitmap(_next_mark_bitmap, _concurrent_workers, true); 672 673 // Clear the live count data. If the marking has been aborted, the abort() 674 // call already did that. 675 if (!has_aborted()) { 676 clear_live_data(_concurrent_workers); 677 DEBUG_ONLY(verify_live_data_clear()); 678 } 679 680 // Repeat the asserts from above. 681 guarantee(cm_thread()->during_cycle(), "invariant"); 682 guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant"); 683 } 684 685 void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) { 686 assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint."); 687 clear_bitmap(_prev_mark_bitmap, workers, false); 688 } 689 690 class CheckBitmapClearHRClosure : public HeapRegionClosure { 691 G1CMBitMap* _bitmap; 692 bool _error; 693 public: 694 CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { 695 } 696 697 virtual bool doHeapRegion(HeapRegion* r) { 698 // This closure can be called concurrently to the mutator, so we must make sure 699 // that the result of the getNextMarkedWordAddress() call is compared to the 700 // value passed to it as limit to detect any found bits. 701 // end never changes in G1. 702 HeapWord* end = r->end(); 703 return _bitmap->get_next_marked_addr(r->bottom(), end) != end; 704 } 705 }; 706 707 bool G1ConcurrentMark::next_mark_bitmap_is_clear() { 708 CheckBitmapClearHRClosure cl(_next_mark_bitmap); 709 _g1h->heap_region_iterate(&cl); 710 return cl.complete(); 711 } 712 713 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 714 public: 715 bool doHeapRegion(HeapRegion* r) { 716 r->note_start_of_marking(); 717 return false; 718 } 719 }; 720 721 void G1ConcurrentMark::checkpoint_roots_initial_pre() { 722 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 723 724 _has_aborted = false; 725 726 // Initialize marking structures. This has to be done in a STW phase. 727 reset(); 728 729 // For each region note start of marking. 730 NoteStartOfMarkHRClosure startcl; 731 g1h->heap_region_iterate(&startcl); 732 } 733 734 735 void G1ConcurrentMark::checkpoint_roots_initial_post() { 736 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 737 738 // Start Concurrent Marking weak-reference discovery. 739 ReferenceProcessor* rp = g1h->ref_processor_cm(); 740 // enable ("weak") refs discovery 741 rp->enable_discovery(); 742 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 743 744 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 745 // This is the start of the marking cycle, we're expected all 746 // threads to have SATB queues with active set to false. 747 satb_mq_set.set_active_all_threads(true, /* new active value */ 748 false /* expected_active */); 749 750 _root_regions.prepare_for_scan(); 751 752 // update_g1_committed() will be called at the end of an evac pause 753 // when marking is on. So, it's also called at the end of the 754 // initial-mark pause to update the heap end, if the heap expands 755 // during it. No need to call it here. 756 } 757 758 /* 759 * Notice that in the next two methods, we actually leave the STS 760 * during the barrier sync and join it immediately afterwards. If we 761 * do not do this, the following deadlock can occur: one thread could 762 * be in the barrier sync code, waiting for the other thread to also 763 * sync up, whereas another one could be trying to yield, while also 764 * waiting for the other threads to sync up too. 765 * 766 * Note, however, that this code is also used during remark and in 767 * this case we should not attempt to leave / enter the STS, otherwise 768 * we'll either hit an assert (debug / fastdebug) or deadlock 769 * (product). So we should only leave / enter the STS if we are 770 * operating concurrently. 771 * 772 * Because the thread that does the sync barrier has left the STS, it 773 * is possible to be suspended for a Full GC or an evacuation pause 774 * could occur. This is actually safe, since the entering the sync 775 * barrier is one of the last things do_marking_step() does, and it 776 * doesn't manipulate any data structures afterwards. 777 */ 778 779 void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) { 780 bool barrier_aborted; 781 { 782 SuspendibleThreadSetLeaver sts_leave(concurrent()); 783 barrier_aborted = !_first_overflow_barrier_sync.enter(); 784 } 785 786 // at this point everyone should have synced up and not be doing any 787 // more work 788 789 if (barrier_aborted) { 790 // If the barrier aborted we ignore the overflow condition and 791 // just abort the whole marking phase as quickly as possible. 792 return; 793 } 794 795 // If we're executing the concurrent phase of marking, reset the marking 796 // state; otherwise the marking state is reset after reference processing, 797 // during the remark pause. 798 // If we reset here as a result of an overflow during the remark we will 799 // see assertion failures from any subsequent set_concurrency_and_phase() 800 // calls. 801 if (concurrent()) { 802 // let the task associated with with worker 0 do this 803 if (worker_id == 0) { 804 // task 0 is responsible for clearing the global data structures 805 // We should be here because of an overflow. During STW we should 806 // not clear the overflow flag since we rely on it being true when 807 // we exit this method to abort the pause and restart concurrent 808 // marking. 809 reset_marking_state(); 810 811 log_info(gc, marking)("Concurrent Mark reset for overflow"); 812 } 813 } 814 815 // after this, each task should reset its own data structures then 816 // then go into the second barrier 817 } 818 819 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) { 820 SuspendibleThreadSetLeaver sts_leave(concurrent()); 821 _second_overflow_barrier_sync.enter(); 822 823 // at this point everything should be re-initialized and ready to go 824 } 825 826 class G1CMConcurrentMarkingTask: public AbstractGangTask { 827 private: 828 G1ConcurrentMark* _cm; 829 ConcurrentMarkThread* _cmt; 830 831 public: 832 void work(uint worker_id) { 833 assert(Thread::current()->is_ConcurrentGC_thread(), "Not a concurrent GC thread"); 834 ResourceMark rm; 835 836 double start_vtime = os::elapsedVTime(); 837 838 { 839 SuspendibleThreadSetJoiner sts_join; 840 841 assert(worker_id < _cm->active_tasks(), "invariant"); 842 843 G1CMTask* task = _cm->task(worker_id); 844 task->record_start_time(); 845 if (!_cm->has_aborted()) { 846 do { 847 task->do_marking_step(G1ConcMarkStepDurationMillis, 848 true /* do_termination */, 849 false /* is_serial*/); 850 851 _cm->do_yield_check(); 852 } while (!_cm->has_aborted() && task->has_aborted()); 853 } 854 task->record_end_time(); 855 guarantee(!task->has_aborted() || _cm->has_aborted(), "invariant"); 856 } 857 858 double end_vtime = os::elapsedVTime(); 859 _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime); 860 } 861 862 G1CMConcurrentMarkingTask(G1ConcurrentMark* cm, 863 ConcurrentMarkThread* cmt) : 864 AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { } 865 866 ~G1CMConcurrentMarkingTask() { } 867 }; 868 869 uint G1ConcurrentMark::calc_active_marking_workers() { 870 uint result = 0; 871 if (!UseDynamicNumberOfGCThreads || 872 (!FLAG_IS_DEFAULT(ConcGCThreads) && 873 !ForceDynamicNumberOfGCThreads)) { 874 result = _max_concurrent_workers; 875 } else { 876 result = 877 AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers, 878 1, /* Minimum workers */ 879 _num_concurrent_workers, 880 Threads::number_of_non_daemon_threads()); 881 // Don't scale the result down by scale_concurrent_workers() because 882 // that scaling has already gone into "_max_concurrent_workers". 883 } 884 assert(result > 0 && result <= _max_concurrent_workers, 885 "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u", 886 _max_concurrent_workers, result); 887 return result; 888 } 889 890 void G1ConcurrentMark::scan_root_region(HeapRegion* hr) { 891 // Currently, only survivors can be root regions. 892 assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant"); 893 G1RootRegionScanClosure cl(_g1h, this); 894 895 const uintx interval = PrefetchScanIntervalInBytes; 896 HeapWord* curr = hr->bottom(); 897 const HeapWord* end = hr->top(); 898 while (curr < end) { 899 Prefetch::read(curr, interval); 900 oop obj = oop(curr); 901 int size = obj->oop_iterate_size(&cl); 902 assert(size == obj->size(), "sanity"); 903 curr += size; 904 } 905 } 906 907 class G1CMRootRegionScanTask : public AbstractGangTask { 908 private: 909 G1ConcurrentMark* _cm; 910 911 public: 912 G1CMRootRegionScanTask(G1ConcurrentMark* cm) : 913 AbstractGangTask("G1 Root Region Scan"), _cm(cm) { } 914 915 void work(uint worker_id) { 916 assert(Thread::current()->is_ConcurrentGC_thread(), 917 "this should only be done by a conc GC thread"); 918 919 G1CMRootRegions* root_regions = _cm->root_regions(); 920 HeapRegion* hr = root_regions->claim_next(); 921 while (hr != NULL) { 922 _cm->scan_root_region(hr); 923 hr = root_regions->claim_next(); 924 } 925 } 926 }; 927 928 void G1ConcurrentMark::scan_root_regions() { 929 // scan_in_progress() will have been set to true only if there was 930 // at least one root region to scan. So, if it's false, we 931 // should not attempt to do any further work. 932 if (root_regions()->scan_in_progress()) { 933 assert(!has_aborted(), "Aborting before root region scanning is finished not supported."); 934 935 _num_concurrent_workers = MIN2(calc_active_marking_workers(), 936 // We distribute work on a per-region basis, so starting 937 // more threads than that is useless. 938 root_regions()->num_root_regions()); 939 assert(_num_concurrent_workers <= _max_concurrent_workers, 940 "Maximum number of marking threads exceeded"); 941 942 G1CMRootRegionScanTask task(this); 943 log_debug(gc, ergo)("Running %s using %u workers for %u work units.", 944 task.name(), _num_concurrent_workers, root_regions()->num_root_regions()); 945 _concurrent_workers->run_task(&task, _num_concurrent_workers); 946 947 // It's possible that has_aborted() is true here without actually 948 // aborting the survivor scan earlier. This is OK as it's 949 // mainly used for sanity checking. 950 root_regions()->scan_finished(); 951 } 952 } 953 954 void G1ConcurrentMark::concurrent_cycle_start() { 955 _gc_timer_cm->register_gc_start(); 956 957 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start()); 958 959 _g1h->trace_heap_before_gc(_gc_tracer_cm); 960 } 961 962 void G1ConcurrentMark::concurrent_cycle_end() { 963 _g1h->trace_heap_after_gc(_gc_tracer_cm); 964 965 if (has_aborted()) { 966 _gc_tracer_cm->report_concurrent_mode_failure(); 967 } 968 969 _gc_timer_cm->register_gc_end(); 970 971 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 972 } 973 974 void G1ConcurrentMark::mark_from_roots() { 975 // we might be tempted to assert that: 976 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 977 // "inconsistent argument?"); 978 // However that wouldn't be right, because it's possible that 979 // a safepoint is indeed in progress as a younger generation 980 // stop-the-world GC happens even as we mark in this generation. 981 982 _restart_for_overflow = false; 983 984 _num_concurrent_workers = calc_active_marking_workers(); 985 986 uint active_workers = MAX2(1U, _num_concurrent_workers); 987 988 // Setting active workers is not guaranteed since fewer 989 // worker threads may currently exist and more may not be 990 // available. 991 active_workers = _concurrent_workers->update_active_workers(active_workers); 992 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers()); 993 994 // Parallel task terminator is set in "set_concurrency_and_phase()" 995 set_concurrency_and_phase(active_workers, true /* concurrent */); 996 997 G1CMConcurrentMarkingTask marking_task(this, cm_thread()); 998 _concurrent_workers->run_task(&marking_task); 999 print_stats(); 1000 } 1001 1002 void G1ConcurrentMark::checkpoint_roots_final(bool clear_all_soft_refs) { 1003 // world is stopped at this checkpoint 1004 assert(SafepointSynchronize::is_at_safepoint(), 1005 "world should be stopped"); 1006 1007 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1008 1009 // If a full collection has happened, we shouldn't do this. 1010 if (has_aborted()) { 1011 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1012 return; 1013 } 1014 1015 SvcGCMarker sgcm(SvcGCMarker::OTHER); 1016 1017 if (VerifyDuringGC) { 1018 HandleMark hm; // handle scope 1019 g1h->prepare_for_verify(); 1020 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1021 } 1022 g1h->verifier()->check_bitmaps("Remark Start"); 1023 1024 G1Policy* g1p = g1h->g1_policy(); 1025 g1p->record_concurrent_mark_remark_start(); 1026 1027 double start = os::elapsedTime(); 1028 1029 checkpoint_roots_final_work(); 1030 1031 double mark_work_end = os::elapsedTime(); 1032 1033 weak_refs_work(clear_all_soft_refs); 1034 1035 if (has_overflown()) { 1036 // We overflowed. Restart concurrent marking. 1037 _restart_for_overflow = true; 1038 1039 // Verify the heap w.r.t. the previous marking bitmap. 1040 if (VerifyDuringGC) { 1041 HandleMark hm; // handle scope 1042 g1h->prepare_for_verify(); 1043 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); 1044 } 1045 1046 // Clear the marking state because we will be restarting 1047 // marking due to overflowing the global mark stack. 1048 reset_marking_state(); 1049 } else { 1050 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1051 // We're done with marking. 1052 // This is the end of the marking cycle, we're expected all 1053 // threads to have SATB queues with active set to true. 1054 satb_mq_set.set_active_all_threads(false, /* new active value */ 1055 true /* expected_active */); 1056 1057 if (VerifyDuringGC) { 1058 HandleMark hm; // handle scope 1059 g1h->prepare_for_verify(); 1060 Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); 1061 } 1062 g1h->verifier()->check_bitmaps("Remark End"); 1063 assert(!restart_for_overflow(), "sanity"); 1064 // Completely reset the marking state since marking completed 1065 set_non_marking_state(); 1066 } 1067 1068 // Statistics 1069 double now = os::elapsedTime(); 1070 _remark_mark_times.add((mark_work_end - start) * 1000.0); 1071 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0); 1072 _remark_times.add((now - start) * 1000.0); 1073 1074 g1p->record_concurrent_mark_remark_end(); 1075 1076 G1CMIsAliveClosure is_alive(g1h); 1077 _gc_tracer_cm->report_object_count_after_gc(&is_alive); 1078 } 1079 1080 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure { 1081 G1CollectedHeap* _g1; 1082 size_t _freed_bytes; 1083 FreeRegionList* _local_cleanup_list; 1084 uint _old_regions_removed; 1085 uint _humongous_regions_removed; 1086 HRRSCleanupTask* _hrrs_cleanup_task; 1087 1088 public: 1089 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1090 FreeRegionList* local_cleanup_list, 1091 HRRSCleanupTask* hrrs_cleanup_task) : 1092 _g1(g1), 1093 _freed_bytes(0), 1094 _local_cleanup_list(local_cleanup_list), 1095 _old_regions_removed(0), 1096 _humongous_regions_removed(0), 1097 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1098 1099 size_t freed_bytes() { return _freed_bytes; } 1100 const uint old_regions_removed() { return _old_regions_removed; } 1101 const uint humongous_regions_removed() { return _humongous_regions_removed; } 1102 1103 bool doHeapRegion(HeapRegion *hr) { 1104 _g1->reset_gc_time_stamps(hr); 1105 hr->note_end_of_marking(); 1106 1107 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young() && !hr->is_archive()) { 1108 _freed_bytes += hr->used(); 1109 hr->set_containing_set(NULL); 1110 if (hr->is_humongous()) { 1111 _humongous_regions_removed++; 1112 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */); 1113 } else { 1114 _old_regions_removed++; 1115 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */); 1116 } 1117 } else { 1118 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task); 1119 } 1120 1121 return false; 1122 } 1123 }; 1124 1125 class G1ParNoteEndTask: public AbstractGangTask { 1126 friend class G1NoteEndOfConcMarkClosure; 1127 1128 protected: 1129 G1CollectedHeap* _g1h; 1130 FreeRegionList* _cleanup_list; 1131 HeapRegionClaimer _hrclaimer; 1132 1133 public: 1134 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) : 1135 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) { 1136 } 1137 1138 void work(uint worker_id) { 1139 FreeRegionList local_cleanup_list("Local Cleanup List"); 1140 HRRSCleanupTask hrrs_cleanup_task; 1141 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, 1142 &hrrs_cleanup_task); 1143 _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); 1144 assert(g1_note_end.complete(), "Shouldn't have yielded!"); 1145 1146 // Now update the lists 1147 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); 1148 { 1149 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1150 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes()); 1151 1152 // If we iterate over the global cleanup list at the end of 1153 // cleanup to do this printing we will not guarantee to only 1154 // generate output for the newly-reclaimed regions (the list 1155 // might not be empty at the beginning of cleanup; we might 1156 // still be working on its previous contents). So we do the 1157 // printing here, before we append the new regions to the global 1158 // cleanup list. 1159 1160 G1HRPrinter* hr_printer = _g1h->hr_printer(); 1161 if (hr_printer->is_active()) { 1162 FreeRegionListIterator iter(&local_cleanup_list); 1163 while (iter.more_available()) { 1164 HeapRegion* hr = iter.get_next(); 1165 hr_printer->cleanup(hr); 1166 } 1167 } 1168 1169 _cleanup_list->add_ordered(&local_cleanup_list); 1170 assert(local_cleanup_list.is_empty(), "post-condition"); 1171 1172 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 1173 } 1174 } 1175 }; 1176 1177 void G1ConcurrentMark::cleanup() { 1178 // world is stopped at this checkpoint 1179 assert(SafepointSynchronize::is_at_safepoint(), 1180 "world should be stopped"); 1181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1182 1183 // If a full collection has happened, we shouldn't do this. 1184 if (has_aborted()) { 1185 g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused 1186 return; 1187 } 1188 1189 g1h->verifier()->verify_region_sets_optional(); 1190 1191 if (VerifyDuringGC) { 1192 HandleMark hm; // handle scope 1193 g1h->prepare_for_verify(); 1194 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); 1195 } 1196 g1h->verifier()->check_bitmaps("Cleanup Start"); 1197 1198 G1Policy* g1p = g1h->g1_policy(); 1199 g1p->record_concurrent_mark_cleanup_start(); 1200 1201 double start = os::elapsedTime(); 1202 1203 HeapRegionRemSet::reset_for_cleanup_tasks(); 1204 1205 { 1206 GCTraceTime(Debug, gc)("Finalize Live Data"); 1207 finalize_live_data(); 1208 } 1209 1210 if (VerifyDuringGC) { 1211 GCTraceTime(Debug, gc)("Verify Live Data"); 1212 verify_live_data(); 1213 } 1214 1215 g1h->collector_state()->set_mark_in_progress(false); 1216 1217 double count_end = os::elapsedTime(); 1218 double this_final_counting_time = (count_end - start); 1219 _total_counting_time += this_final_counting_time; 1220 1221 if (log_is_enabled(Trace, gc, liveness)) { 1222 G1PrintRegionLivenessInfoClosure cl("Post-Marking"); 1223 _g1h->heap_region_iterate(&cl); 1224 } 1225 1226 // Install newly created mark bitMap as "prev". 1227 swap_mark_bitmaps(); 1228 1229 g1h->reset_gc_time_stamp(); 1230 1231 uint n_workers = _g1h->workers()->active_workers(); 1232 1233 // Note end of marking in all heap regions. 1234 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers); 1235 g1h->workers()->run_task(&g1_par_note_end_task); 1236 g1h->check_gc_time_stamps(); 1237 1238 if (!cleanup_list_is_empty()) { 1239 // The cleanup list is not empty, so we'll have to process it 1240 // concurrently. Notify anyone else that might be wanting free 1241 // regions that there will be more free regions coming soon. 1242 g1h->set_free_regions_coming(); 1243 } 1244 1245 // call below, since it affects the metric by which we sort the heap 1246 // regions. 1247 if (G1ScrubRemSets) { 1248 double rs_scrub_start = os::elapsedTime(); 1249 g1h->scrub_rem_set(); 1250 _total_rs_scrub_time += (os::elapsedTime() - rs_scrub_start); 1251 } 1252 1253 // this will also free any regions totally full of garbage objects, 1254 // and sort the regions. 1255 g1h->g1_policy()->record_concurrent_mark_cleanup_end(); 1256 1257 // Statistics. 1258 double end = os::elapsedTime(); 1259 _cleanup_times.add((end - start) * 1000.0); 1260 1261 // Clean up will have freed any regions completely full of garbage. 1262 // Update the soft reference policy with the new heap occupancy. 1263 Universe::update_heap_info_at_gc(); 1264 1265 if (VerifyDuringGC) { 1266 HandleMark hm; // handle scope 1267 g1h->prepare_for_verify(); 1268 Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); 1269 } 1270 1271 g1h->verifier()->check_bitmaps("Cleanup End"); 1272 1273 g1h->verifier()->verify_region_sets_optional(); 1274 1275 // We need to make this be a "collection" so any collection pause that 1276 // races with it goes around and waits for completeCleanup to finish. 1277 g1h->increment_total_collections(); 1278 1279 // Clean out dead classes and update Metaspace sizes. 1280 if (ClassUnloadingWithConcurrentMark) { 1281 ClassLoaderDataGraph::purge(); 1282 } 1283 MetaspaceGC::compute_new_size(); 1284 1285 // We reclaimed old regions so we should calculate the sizes to make 1286 // sure we update the old gen/space data. 1287 g1h->g1mm()->update_sizes(); 1288 g1h->allocation_context_stats().update_after_mark(); 1289 } 1290 1291 void G1ConcurrentMark::complete_cleanup() { 1292 if (has_aborted()) return; 1293 1294 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1295 1296 _cleanup_list.verify_optional(); 1297 FreeRegionList tmp_free_list("Tmp Free List"); 1298 1299 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1300 "cleanup list has %u entries", 1301 _cleanup_list.length()); 1302 1303 // No one else should be accessing the _cleanup_list at this point, 1304 // so it is not necessary to take any locks 1305 while (!_cleanup_list.is_empty()) { 1306 HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); 1307 assert(hr != NULL, "Got NULL from a non-empty list"); 1308 hr->par_clear(); 1309 tmp_free_list.add_ordered(hr); 1310 1311 // Instead of adding one region at a time to the secondary_free_list, 1312 // we accumulate them in the local list and move them a few at a 1313 // time. This also cuts down on the number of notify_all() calls 1314 // we do during this process. We'll also append the local list when 1315 // _cleanup_list is empty (which means we just removed the last 1316 // region from the _cleanup_list). 1317 if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || 1318 _cleanup_list.is_empty()) { 1319 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " 1320 "appending %u entries to the secondary_free_list, " 1321 "cleanup list still has %u entries", 1322 tmp_free_list.length(), 1323 _cleanup_list.length()); 1324 1325 { 1326 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1327 g1h->secondary_free_list_add(&tmp_free_list); 1328 SecondaryFreeList_lock->notify_all(); 1329 } 1330 #ifndef PRODUCT 1331 if (G1StressConcRegionFreeing) { 1332 for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) { 1333 os::sleep(Thread::current(), (jlong) 1, false); 1334 } 1335 } 1336 #endif 1337 } 1338 } 1339 assert(tmp_free_list.is_empty(), "post-condition"); 1340 } 1341 1342 // Supporting Object and Oop closures for reference discovery 1343 // and processing in during marking 1344 1345 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1346 HeapWord* addr = (HeapWord*)obj; 1347 return addr != NULL && 1348 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1349 } 1350 1351 // 'Keep Alive' oop closure used by both serial parallel reference processing. 1352 // Uses the G1CMTask associated with a worker thread (for serial reference 1353 // processing the G1CMTask for worker 0 is used) to preserve (mark) and 1354 // trace referent objects. 1355 // 1356 // Using the G1CMTask and embedded local queues avoids having the worker 1357 // threads operating on the global mark stack. This reduces the risk 1358 // of overflowing the stack - which we would rather avoid at this late 1359 // state. Also using the tasks' local queues removes the potential 1360 // of the workers interfering with each other that could occur if 1361 // operating on the global stack. 1362 1363 class G1CMKeepAliveAndDrainClosure: public OopClosure { 1364 G1ConcurrentMark* _cm; 1365 G1CMTask* _task; 1366 int _ref_counter_limit; 1367 int _ref_counter; 1368 bool _is_serial; 1369 public: 1370 G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1371 _cm(cm), _task(task), _is_serial(is_serial), 1372 _ref_counter_limit(G1RefProcDrainInterval) { 1373 assert(_ref_counter_limit > 0, "sanity"); 1374 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1375 _ref_counter = _ref_counter_limit; 1376 } 1377 1378 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1379 virtual void do_oop( oop* p) { do_oop_work(p); } 1380 1381 template <class T> void do_oop_work(T* p) { 1382 if (!_cm->has_overflown()) { 1383 oop obj = oopDesc::load_decode_heap_oop(p); 1384 _task->deal_with_reference(obj); 1385 _ref_counter--; 1386 1387 if (_ref_counter == 0) { 1388 // We have dealt with _ref_counter_limit references, pushing them 1389 // and objects reachable from them on to the local stack (and 1390 // possibly the global stack). Call G1CMTask::do_marking_step() to 1391 // process these entries. 1392 // 1393 // We call G1CMTask::do_marking_step() in a loop, which we'll exit if 1394 // there's nothing more to do (i.e. we're done with the entries that 1395 // were pushed as a result of the G1CMTask::deal_with_reference() calls 1396 // above) or we overflow. 1397 // 1398 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1399 // flag while there may still be some work to do. (See the comment at 1400 // the beginning of G1CMTask::do_marking_step() for those conditions - 1401 // one of which is reaching the specified time target.) It is only 1402 // when G1CMTask::do_marking_step() returns without setting the 1403 // has_aborted() flag that the marking step has completed. 1404 do { 1405 double mark_step_duration_ms = G1ConcMarkStepDurationMillis; 1406 _task->do_marking_step(mark_step_duration_ms, 1407 false /* do_termination */, 1408 _is_serial); 1409 } while (_task->has_aborted() && !_cm->has_overflown()); 1410 _ref_counter = _ref_counter_limit; 1411 } 1412 } 1413 } 1414 }; 1415 1416 // 'Drain' oop closure used by both serial and parallel reference processing. 1417 // Uses the G1CMTask associated with a given worker thread (for serial 1418 // reference processing the G1CMtask for worker 0 is used). Calls the 1419 // do_marking_step routine, with an unbelievably large timeout value, 1420 // to drain the marking data structures of the remaining entries 1421 // added by the 'keep alive' oop closure above. 1422 1423 class G1CMDrainMarkingStackClosure: public VoidClosure { 1424 G1ConcurrentMark* _cm; 1425 G1CMTask* _task; 1426 bool _is_serial; 1427 public: 1428 G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) : 1429 _cm(cm), _task(task), _is_serial(is_serial) { 1430 assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code"); 1431 } 1432 1433 void do_void() { 1434 do { 1435 // We call G1CMTask::do_marking_step() to completely drain the local 1436 // and global marking stacks of entries pushed by the 'keep alive' 1437 // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). 1438 // 1439 // G1CMTask::do_marking_step() is called in a loop, which we'll exit 1440 // if there's nothing more to do (i.e. we've completely drained the 1441 // entries that were pushed as a a result of applying the 'keep alive' 1442 // closure to the entries on the discovered ref lists) or we overflow 1443 // the global marking stack. 1444 // 1445 // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() 1446 // flag while there may still be some work to do. (See the comment at 1447 // the beginning of G1CMTask::do_marking_step() for those conditions - 1448 // one of which is reaching the specified time target.) It is only 1449 // when G1CMTask::do_marking_step() returns without setting the 1450 // has_aborted() flag that the marking step has completed. 1451 1452 _task->do_marking_step(1000000000.0 /* something very large */, 1453 true /* do_termination */, 1454 _is_serial); 1455 } while (_task->has_aborted() && !_cm->has_overflown()); 1456 } 1457 }; 1458 1459 // Implementation of AbstractRefProcTaskExecutor for parallel 1460 // reference processing at the end of G1 concurrent marking 1461 1462 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 1463 private: 1464 G1CollectedHeap* _g1h; 1465 G1ConcurrentMark* _cm; 1466 WorkGang* _workers; 1467 uint _active_workers; 1468 1469 public: 1470 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h, 1471 G1ConcurrentMark* cm, 1472 WorkGang* workers, 1473 uint n_workers) : 1474 _g1h(g1h), _cm(cm), 1475 _workers(workers), _active_workers(n_workers) { } 1476 1477 // Executes the given task using concurrent marking worker threads. 1478 virtual void execute(ProcessTask& task); 1479 virtual void execute(EnqueueTask& task); 1480 }; 1481 1482 class G1CMRefProcTaskProxy: public AbstractGangTask { 1483 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 1484 ProcessTask& _proc_task; 1485 G1CollectedHeap* _g1h; 1486 G1ConcurrentMark* _cm; 1487 1488 public: 1489 G1CMRefProcTaskProxy(ProcessTask& proc_task, 1490 G1CollectedHeap* g1h, 1491 G1ConcurrentMark* cm) : 1492 AbstractGangTask("Process reference objects in parallel"), 1493 _proc_task(proc_task), _g1h(g1h), _cm(cm) { 1494 ReferenceProcessor* rp = _g1h->ref_processor_cm(); 1495 assert(rp->processing_is_mt(), "shouldn't be here otherwise"); 1496 } 1497 1498 virtual void work(uint worker_id) { 1499 ResourceMark rm; 1500 HandleMark hm; 1501 G1CMTask* task = _cm->task(worker_id); 1502 G1CMIsAliveClosure g1_is_alive(_g1h); 1503 G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); 1504 G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */); 1505 1506 _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); 1507 } 1508 }; 1509 1510 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) { 1511 assert(_workers != NULL, "Need parallel worker threads."); 1512 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1513 1514 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm); 1515 1516 // We need to reset the concurrency level before each 1517 // proxy task execution, so that the termination protocol 1518 // and overflow handling in G1CMTask::do_marking_step() knows 1519 // how many workers to wait for. 1520 _cm->set_concurrency(_active_workers); 1521 _workers->run_task(&proc_task_proxy); 1522 } 1523 1524 class G1CMRefEnqueueTaskProxy: public AbstractGangTask { 1525 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 1526 EnqueueTask& _enq_task; 1527 1528 public: 1529 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) : 1530 AbstractGangTask("Enqueue reference objects in parallel"), 1531 _enq_task(enq_task) { } 1532 1533 virtual void work(uint worker_id) { 1534 _enq_task.work(worker_id); 1535 } 1536 }; 1537 1538 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) { 1539 assert(_workers != NULL, "Need parallel worker threads."); 1540 assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT"); 1541 1542 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task); 1543 1544 // Not strictly necessary but... 1545 // 1546 // We need to reset the concurrency level before each 1547 // proxy task execution, so that the termination protocol 1548 // and overflow handling in G1CMTask::do_marking_step() knows 1549 // how many workers to wait for. 1550 _cm->set_concurrency(_active_workers); 1551 _workers->run_task(&enq_task_proxy); 1552 } 1553 1554 void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { 1555 if (has_overflown()) { 1556 // Skip processing the discovered references if we have 1557 // overflown the global marking stack. Reference objects 1558 // only get discovered once so it is OK to not 1559 // de-populate the discovered reference lists. We could have, 1560 // but the only benefit would be that, when marking restarts, 1561 // less reference objects are discovered. 1562 return; 1563 } 1564 1565 ResourceMark rm; 1566 HandleMark hm; 1567 1568 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1569 1570 // Is alive closure. 1571 G1CMIsAliveClosure g1_is_alive(g1h); 1572 1573 // Inner scope to exclude the cleaning of the string and symbol 1574 // tables from the displayed time. 1575 { 1576 GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm); 1577 1578 ReferenceProcessor* rp = g1h->ref_processor_cm(); 1579 1580 // See the comment in G1CollectedHeap::ref_processing_init() 1581 // about how reference processing currently works in G1. 1582 1583 // Set the soft reference policy 1584 rp->setup_policy(clear_all_soft_refs); 1585 assert(_global_mark_stack.is_empty(), "mark stack should be empty"); 1586 1587 // Instances of the 'Keep Alive' and 'Complete GC' closures used 1588 // in serial reference processing. Note these closures are also 1589 // used for serially processing (by the the current thread) the 1590 // JNI references during parallel reference processing. 1591 // 1592 // These closures do not need to synchronize with the worker 1593 // threads involved in parallel reference processing as these 1594 // instances are executed serially by the current thread (e.g. 1595 // reference processing is not multi-threaded and is thus 1596 // performed by the current thread instead of a gang worker). 1597 // 1598 // The gang tasks involved in parallel reference processing create 1599 // their own instances of these closures, which do their own 1600 // synchronization among themselves. 1601 G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */); 1602 G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */); 1603 1604 // We need at least one active thread. If reference processing 1605 // is not multi-threaded we use the current (VMThread) thread, 1606 // otherwise we use the work gang from the G1CollectedHeap and 1607 // we utilize all the worker threads we can. 1608 bool processing_is_mt = rp->processing_is_mt(); 1609 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U); 1610 active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U); 1611 1612 // Parallel processing task executor. 1613 G1CMRefProcTaskExecutor par_task_executor(g1h, this, 1614 g1h->workers(), active_workers); 1615 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL); 1616 1617 // Set the concurrency level. The phase was already set prior to 1618 // executing the remark task. 1619 set_concurrency(active_workers); 1620 1621 // Set the degree of MT processing here. If the discovery was done MT, 1622 // the number of threads involved during discovery could differ from 1623 // the number of active workers. This is OK as long as the discovered 1624 // Reference lists are balanced (see balance_all_queues() and balance_queues()). 1625 rp->set_active_mt_degree(active_workers); 1626 1627 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); 1628 1629 // Process the weak references. 1630 const ReferenceProcessorStats& stats = 1631 rp->process_discovered_references(&g1_is_alive, 1632 &g1_keep_alive, 1633 &g1_drain_mark_stack, 1634 executor, 1635 &pt); 1636 _gc_tracer_cm->report_gc_reference_stats(stats); 1637 pt.print_all_references(); 1638 1639 // The do_oop work routines of the keep_alive and drain_marking_stack 1640 // oop closures will set the has_overflown flag if we overflow the 1641 // global marking stack. 1642 1643 assert(has_overflown() || _global_mark_stack.is_empty(), 1644 "Mark stack should be empty (unless it has overflown)"); 1645 1646 assert(rp->num_q() == active_workers, "why not"); 1647 1648 rp->enqueue_discovered_references(executor, &pt); 1649 1650 rp->verify_no_references_recorded(); 1651 1652 pt.print_enqueue_phase(); 1653 1654 assert(!rp->discovery_enabled(), "Post condition"); 1655 } 1656 1657 assert(has_overflown() || _global_mark_stack.is_empty(), 1658 "Mark stack should be empty (unless it has overflown)"); 1659 1660 { 1661 GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); 1662 WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); 1663 } 1664 1665 if (has_overflown()) { 1666 // We can not trust g1_is_alive if the marking stack overflowed 1667 return; 1668 } 1669 1670 assert(_global_mark_stack.is_empty(), "Marking should have completed"); 1671 1672 // Unload Klasses, String, Symbols, Code Cache, etc. 1673 if (ClassUnloadingWithConcurrentMark) { 1674 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); 1675 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */); 1676 g1h->complete_cleaning(&g1_is_alive, purged_classes); 1677 } else { 1678 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); 1679 // No need to clean string table and symbol table as they are treated as strong roots when 1680 // class unloading is disabled. 1681 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); 1682 1683 } 1684 } 1685 1686 void G1ConcurrentMark::swap_mark_bitmaps() { 1687 G1CMBitMap* temp = _prev_mark_bitmap; 1688 _prev_mark_bitmap = _next_mark_bitmap; 1689 _next_mark_bitmap = temp; 1690 } 1691 1692 // Closure for marking entries in SATB buffers. 1693 class G1CMSATBBufferClosure : public SATBBufferClosure { 1694 private: 1695 G1CMTask* _task; 1696 G1CollectedHeap* _g1h; 1697 1698 // This is very similar to G1CMTask::deal_with_reference, but with 1699 // more relaxed requirements for the argument, so this must be more 1700 // circumspect about treating the argument as an object. 1701 void do_entry(void* entry) const { 1702 _task->increment_refs_reached(); 1703 oop const obj = static_cast<oop>(entry); 1704 _task->make_reference_grey(obj); 1705 } 1706 1707 public: 1708 G1CMSATBBufferClosure(G1CMTask* task, G1CollectedHeap* g1h) 1709 : _task(task), _g1h(g1h) { } 1710 1711 virtual void do_buffer(void** buffer, size_t size) { 1712 for (size_t i = 0; i < size; ++i) { 1713 do_entry(buffer[i]); 1714 } 1715 } 1716 }; 1717 1718 class G1RemarkThreadsClosure : public ThreadClosure { 1719 G1CMSATBBufferClosure _cm_satb_cl; 1720 G1CMOopClosure _cm_cl; 1721 MarkingCodeBlobClosure _code_cl; 1722 int _thread_parity; 1723 1724 public: 1725 G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : 1726 _cm_satb_cl(task, g1h), 1727 _cm_cl(g1h, g1h->concurrent_mark(), task), 1728 _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), 1729 _thread_parity(Threads::thread_claim_parity()) {} 1730 1731 void do_thread(Thread* thread) { 1732 if (thread->is_Java_thread()) { 1733 if (thread->claim_oops_do(true, _thread_parity)) { 1734 JavaThread* jt = (JavaThread*)thread; 1735 1736 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 1737 // however the liveness of oops reachable from nmethods have very complex lifecycles: 1738 // * Alive if on the stack of an executing method 1739 // * Weakly reachable otherwise 1740 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 1741 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 1742 jt->nmethods_do(&_code_cl); 1743 1744 jt->satb_mark_queue().apply_closure_and_empty(&_cm_satb_cl); 1745 } 1746 } else if (thread->is_VM_thread()) { 1747 if (thread->claim_oops_do(true, _thread_parity)) { 1748 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_satb_cl); 1749 } 1750 } 1751 } 1752 }; 1753 1754 class G1CMRemarkTask: public AbstractGangTask { 1755 private: 1756 G1ConcurrentMark* _cm; 1757 public: 1758 void work(uint worker_id) { 1759 G1CMTask* task = _cm->task(worker_id); 1760 task->record_start_time(); 1761 { 1762 ResourceMark rm; 1763 HandleMark hm; 1764 1765 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); 1766 Threads::threads_do(&threads_f); 1767 } 1768 1769 do { 1770 task->do_marking_step(1000000000.0 /* something very large */, 1771 true /* do_termination */, 1772 false /* is_serial */); 1773 } while (task->has_aborted() && !_cm->has_overflown()); 1774 // If we overflow, then we do not want to restart. We instead 1775 // want to abort remark and do concurrent marking again. 1776 task->record_end_time(); 1777 } 1778 1779 G1CMRemarkTask(G1ConcurrentMark* cm, uint active_workers) : 1780 AbstractGangTask("Par Remark"), _cm(cm) { 1781 _cm->terminator()->reset_for_reuse(active_workers); 1782 } 1783 }; 1784 1785 void G1ConcurrentMark::checkpoint_roots_final_work() { 1786 ResourceMark rm; 1787 HandleMark hm; 1788 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1789 1790 GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm); 1791 1792 g1h->ensure_parsability(false); 1793 1794 // this is remark, so we'll use up all active threads 1795 uint active_workers = g1h->workers()->active_workers(); 1796 set_concurrency_and_phase(active_workers, false /* concurrent */); 1797 // Leave _parallel_marking_threads at it's 1798 // value originally calculated in the G1ConcurrentMark 1799 // constructor and pass values of the active workers 1800 // through the gang in the task. 1801 1802 { 1803 StrongRootsScope srs(active_workers); 1804 1805 G1CMRemarkTask remarkTask(this, active_workers); 1806 // We will start all available threads, even if we decide that the 1807 // active_workers will be fewer. The extra ones will just bail out 1808 // immediately. 1809 g1h->workers()->run_task(&remarkTask); 1810 } 1811 1812 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 1813 guarantee(has_overflown() || 1814 satb_mq_set.completed_buffers_num() == 0, 1815 "Invariant: has_overflown = %s, num buffers = " SIZE_FORMAT, 1816 BOOL_TO_STR(has_overflown()), 1817 satb_mq_set.completed_buffers_num()); 1818 1819 print_stats(); 1820 } 1821 1822 void G1ConcurrentMark::clear_range_in_prev_bitmap(MemRegion mr) { 1823 _prev_mark_bitmap->clear_range(mr); 1824 } 1825 1826 HeapRegion* 1827 G1ConcurrentMark::claim_region(uint worker_id) { 1828 // "checkpoint" the finger 1829 HeapWord* finger = _finger; 1830 1831 // _heap_end will not change underneath our feet; it only changes at 1832 // yield points. 1833 while (finger < _heap_end) { 1834 assert(_g1h->is_in_g1_reserved(finger), "invariant"); 1835 1836 HeapRegion* curr_region = _g1h->heap_region_containing(finger); 1837 // Make sure that the reads below do not float before loading curr_region. 1838 OrderAccess::loadload(); 1839 // Above heap_region_containing may return NULL as we always scan claim 1840 // until the end of the heap. In this case, just jump to the next region. 1841 HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; 1842 1843 // Is the gap between reading the finger and doing the CAS too long? 1844 HeapWord* res = Atomic::cmpxchg(end, &_finger, finger); 1845 if (res == finger && curr_region != NULL) { 1846 // we succeeded 1847 HeapWord* bottom = curr_region->bottom(); 1848 HeapWord* limit = curr_region->next_top_at_mark_start(); 1849 1850 // notice that _finger == end cannot be guaranteed here since, 1851 // someone else might have moved the finger even further 1852 assert(_finger >= end, "the finger should have moved forward"); 1853 1854 if (limit > bottom) { 1855 return curr_region; 1856 } else { 1857 assert(limit == bottom, 1858 "the region limit should be at bottom"); 1859 // we return NULL and the caller should try calling 1860 // claim_region() again. 1861 return NULL; 1862 } 1863 } else { 1864 assert(_finger > finger, "the finger should have moved forward"); 1865 // read it again 1866 finger = _finger; 1867 } 1868 } 1869 1870 return NULL; 1871 } 1872 1873 #ifndef PRODUCT 1874 class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC { 1875 private: 1876 G1CollectedHeap* _g1h; 1877 const char* _phase; 1878 int _info; 1879 1880 public: 1881 VerifyNoCSetOops(const char* phase, int info = -1) : 1882 _g1h(G1CollectedHeap::heap()), 1883 _phase(phase), 1884 _info(info) 1885 { } 1886 1887 void operator()(G1TaskQueueEntry task_entry) const { 1888 if (task_entry.is_array_slice()) { 1889 guarantee(_g1h->is_in_reserved(task_entry.slice()), "Slice " PTR_FORMAT " must be in heap.", p2i(task_entry.slice())); 1890 return; 1891 } 1892 guarantee(oopDesc::is_oop(task_entry.obj()), 1893 "Non-oop " PTR_FORMAT ", phase: %s, info: %d", 1894 p2i(task_entry.obj()), _phase, _info); 1895 guarantee(!_g1h->is_in_cset(task_entry.obj()), 1896 "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d", 1897 p2i(task_entry.obj()), _phase, _info); 1898 } 1899 }; 1900 1901 void G1ConcurrentMark::verify_no_cset_oops() { 1902 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1903 if (!G1CollectedHeap::heap()->collector_state()->mark_in_progress()) { 1904 return; 1905 } 1906 1907 // Verify entries on the global mark stack 1908 _global_mark_stack.iterate(VerifyNoCSetOops("Stack")); 1909 1910 // Verify entries on the task queues 1911 for (uint i = 0; i < _max_num_tasks; ++i) { 1912 G1CMTaskQueue* queue = _task_queues->queue(i); 1913 queue->iterate(VerifyNoCSetOops("Queue", i)); 1914 } 1915 1916 // Verify the global finger 1917 HeapWord* global_finger = finger(); 1918 if (global_finger != NULL && global_finger < _heap_end) { 1919 // Since we always iterate over all regions, we might get a NULL HeapRegion 1920 // here. 1921 HeapRegion* global_hr = _g1h->heap_region_containing(global_finger); 1922 guarantee(global_hr == NULL || global_finger == global_hr->bottom(), 1923 "global finger: " PTR_FORMAT " region: " HR_FORMAT, 1924 p2i(global_finger), HR_FORMAT_PARAMS(global_hr)); 1925 } 1926 1927 // Verify the task fingers 1928 assert(_num_concurrent_workers <= _max_num_tasks, "sanity"); 1929 for (uint i = 0; i < _num_concurrent_workers; ++i) { 1930 G1CMTask* task = _tasks[i]; 1931 HeapWord* task_finger = task->finger(); 1932 if (task_finger != NULL && task_finger < _heap_end) { 1933 // See above note on the global finger verification. 1934 HeapRegion* task_hr = _g1h->heap_region_containing(task_finger); 1935 guarantee(task_hr == NULL || task_finger == task_hr->bottom() || 1936 !task_hr->in_collection_set(), 1937 "task finger: " PTR_FORMAT " region: " HR_FORMAT, 1938 p2i(task_finger), HR_FORMAT_PARAMS(task_hr)); 1939 } 1940 } 1941 } 1942 #endif // PRODUCT 1943 void G1ConcurrentMark::create_live_data() { 1944 _g1h->g1_rem_set()->create_card_live_data(_concurrent_workers, _next_mark_bitmap); 1945 } 1946 1947 void G1ConcurrentMark::finalize_live_data() { 1948 _g1h->g1_rem_set()->finalize_card_live_data(_g1h->workers(), _next_mark_bitmap); 1949 } 1950 1951 void G1ConcurrentMark::verify_live_data() { 1952 _g1h->g1_rem_set()->verify_card_live_data(_g1h->workers(), _next_mark_bitmap); 1953 } 1954 1955 void G1ConcurrentMark::clear_live_data(WorkGang* workers) { 1956 _g1h->g1_rem_set()->clear_card_live_data(workers); 1957 } 1958 1959 #ifdef ASSERT 1960 void G1ConcurrentMark::verify_live_data_clear() { 1961 _g1h->g1_rem_set()->verify_card_live_data_is_clear(); 1962 } 1963 #endif 1964 1965 void G1ConcurrentMark::print_stats() { 1966 if (!log_is_enabled(Debug, gc, stats)) { 1967 return; 1968 } 1969 log_debug(gc, stats)("---------------------------------------------------------------------"); 1970 for (size_t i = 0; i < _num_active_tasks; ++i) { 1971 _tasks[i]->print_stats(); 1972 log_debug(gc, stats)("---------------------------------------------------------------------"); 1973 } 1974 } 1975 1976 void G1ConcurrentMark::abort() { 1977 if (!cm_thread()->during_cycle() || _has_aborted) { 1978 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything. 1979 return; 1980 } 1981 1982 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next 1983 // concurrent bitmap clearing. 1984 { 1985 GCTraceTime(Debug, gc)("Clear Next Bitmap"); 1986 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false); 1987 } 1988 // Note we cannot clear the previous marking bitmap here 1989 // since VerifyDuringGC verifies the objects marked during 1990 // a full GC against the previous bitmap. 1991 1992 { 1993 GCTraceTime(Debug, gc)("Clear Live Data"); 1994 clear_live_data(_g1h->workers()); 1995 } 1996 DEBUG_ONLY({ 1997 GCTraceTime(Debug, gc)("Verify Live Data Clear"); 1998 verify_live_data_clear(); 1999 }) 2000 // Empty mark stack 2001 reset_marking_state(); 2002 for (uint i = 0; i < _max_num_tasks; ++i) { 2003 _tasks[i]->clear_region_fields(); 2004 } 2005 _first_overflow_barrier_sync.abort(); 2006 _second_overflow_barrier_sync.abort(); 2007 _has_aborted = true; 2008 2009 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2010 satb_mq_set.abandon_partial_marking(); 2011 // This can be called either during or outside marking, we'll read 2012 // the expected_active value from the SATB queue set. 2013 satb_mq_set.set_active_all_threads( 2014 false, /* new active value */ 2015 satb_mq_set.is_active() /* expected_active */); 2016 } 2017 2018 static void print_ms_time_info(const char* prefix, const char* name, 2019 NumberSeq& ns) { 2020 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", 2021 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); 2022 if (ns.num() > 0) { 2023 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", 2024 prefix, ns.sd(), ns.maximum()); 2025 } 2026 } 2027 2028 void G1ConcurrentMark::print_summary_info() { 2029 Log(gc, marking) log; 2030 if (!log.is_trace()) { 2031 return; 2032 } 2033 2034 log.trace(" Concurrent marking:"); 2035 print_ms_time_info(" ", "init marks", _init_times); 2036 print_ms_time_info(" ", "remarks", _remark_times); 2037 { 2038 print_ms_time_info(" ", "final marks", _remark_mark_times); 2039 print_ms_time_info(" ", "weak refs", _remark_weak_ref_times); 2040 2041 } 2042 print_ms_time_info(" ", "cleanups", _cleanup_times); 2043 log.trace(" Finalize live data total time = %8.2f s (avg = %8.2f ms).", 2044 _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2045 if (G1ScrubRemSets) { 2046 log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", 2047 _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); 2048 } 2049 log.trace(" Total stop_world time = %8.2f s.", 2050 (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); 2051 log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", 2052 cm_thread()->vtime_accum(), cm_thread()->vtime_mark_accum()); 2053 } 2054 2055 void G1ConcurrentMark::print_worker_threads_on(outputStream* st) const { 2056 _concurrent_workers->print_worker_threads_on(st); 2057 } 2058 2059 void G1ConcurrentMark::threads_do(ThreadClosure* tc) const { 2060 _concurrent_workers->threads_do(tc); 2061 } 2062 2063 void G1ConcurrentMark::print_on_error(outputStream* st) const { 2064 st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT, 2065 p2i(_prev_mark_bitmap), p2i(_next_mark_bitmap)); 2066 _prev_mark_bitmap->print_on_error(st, " Prev Bits: "); 2067 _next_mark_bitmap->print_on_error(st, " Next Bits: "); 2068 } 2069 2070 static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) { 2071 ReferenceProcessor* result = g1h->ref_processor_cm(); 2072 assert(result != NULL, "CM reference processor should not be NULL"); 2073 return result; 2074 } 2075 2076 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 2077 G1ConcurrentMark* cm, 2078 G1CMTask* task) 2079 : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)), 2080 _g1h(g1h), _cm(cm), _task(task) 2081 { } 2082 2083 void G1CMTask::setup_for_region(HeapRegion* hr) { 2084 assert(hr != NULL, 2085 "claim_region() should have filtered out NULL regions"); 2086 _curr_region = hr; 2087 _finger = hr->bottom(); 2088 update_region_limit(); 2089 } 2090 2091 void G1CMTask::update_region_limit() { 2092 HeapRegion* hr = _curr_region; 2093 HeapWord* bottom = hr->bottom(); 2094 HeapWord* limit = hr->next_top_at_mark_start(); 2095 2096 if (limit == bottom) { 2097 // The region was collected underneath our feet. 2098 // We set the finger to bottom to ensure that the bitmap 2099 // iteration that will follow this will not do anything. 2100 // (this is not a condition that holds when we set the region up, 2101 // as the region is not supposed to be empty in the first place) 2102 _finger = bottom; 2103 } else if (limit >= _region_limit) { 2104 assert(limit >= _finger, "peace of mind"); 2105 } else { 2106 assert(limit < _region_limit, "only way to get here"); 2107 // This can happen under some pretty unusual circumstances. An 2108 // evacuation pause empties the region underneath our feet (NTAMS 2109 // at bottom). We then do some allocation in the region (NTAMS 2110 // stays at bottom), followed by the region being used as a GC 2111 // alloc region (NTAMS will move to top() and the objects 2112 // originally below it will be grayed). All objects now marked in 2113 // the region are explicitly grayed, if below the global finger, 2114 // and we do not need in fact to scan anything else. So, we simply 2115 // set _finger to be limit to ensure that the bitmap iteration 2116 // doesn't do anything. 2117 _finger = limit; 2118 } 2119 2120 _region_limit = limit; 2121 } 2122 2123 void G1CMTask::giveup_current_region() { 2124 assert(_curr_region != NULL, "invariant"); 2125 clear_region_fields(); 2126 } 2127 2128 void G1CMTask::clear_region_fields() { 2129 // Values for these three fields that indicate that we're not 2130 // holding on to a region. 2131 _curr_region = NULL; 2132 _finger = NULL; 2133 _region_limit = NULL; 2134 } 2135 2136 void G1CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) { 2137 if (cm_oop_closure == NULL) { 2138 assert(_cm_oop_closure != NULL, "invariant"); 2139 } else { 2140 assert(_cm_oop_closure == NULL, "invariant"); 2141 } 2142 _cm_oop_closure = cm_oop_closure; 2143 } 2144 2145 void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) { 2146 guarantee(next_mark_bitmap != NULL, "invariant"); 2147 _next_mark_bitmap = next_mark_bitmap; 2148 clear_region_fields(); 2149 2150 _calls = 0; 2151 _elapsed_time_ms = 0.0; 2152 _termination_time_ms = 0.0; 2153 _termination_start_time_ms = 0.0; 2154 } 2155 2156 bool G1CMTask::should_exit_termination() { 2157 regular_clock_call(); 2158 // This is called when we are in the termination protocol. We should 2159 // quit if, for some reason, this task wants to abort or the global 2160 // stack is not empty (this means that we can get work from it). 2161 return !_cm->mark_stack_empty() || has_aborted(); 2162 } 2163 2164 void G1CMTask::reached_limit() { 2165 assert(_words_scanned >= _words_scanned_limit || 2166 _refs_reached >= _refs_reached_limit , 2167 "shouldn't have been called otherwise"); 2168 regular_clock_call(); 2169 } 2170 2171 void G1CMTask::regular_clock_call() { 2172 if (has_aborted()) return; 2173 2174 // First, we need to recalculate the words scanned and refs reached 2175 // limits for the next clock call. 2176 recalculate_limits(); 2177 2178 // During the regular clock call we do the following 2179 2180 // (1) If an overflow has been flagged, then we abort. 2181 if (_cm->has_overflown()) { 2182 set_has_aborted(); 2183 return; 2184 } 2185 2186 // If we are not concurrent (i.e. we're doing remark) we don't need 2187 // to check anything else. The other steps are only needed during 2188 // the concurrent marking phase. 2189 if (!_concurrent) { 2190 return; 2191 } 2192 2193 // (2) If marking has been aborted for Full GC, then we also abort. 2194 if (_cm->has_aborted()) { 2195 set_has_aborted(); 2196 return; 2197 } 2198 2199 double curr_time_ms = os::elapsedVTime() * 1000.0; 2200 2201 // (4) We check whether we should yield. If we have to, then we abort. 2202 if (SuspendibleThreadSet::should_yield()) { 2203 // We should yield. To do this we abort the task. The caller is 2204 // responsible for yielding. 2205 set_has_aborted(); 2206 return; 2207 } 2208 2209 // (5) We check whether we've reached our time quota. If we have, 2210 // then we abort. 2211 double elapsed_time_ms = curr_time_ms - _start_time_ms; 2212 if (elapsed_time_ms > _time_target_ms) { 2213 set_has_aborted(); 2214 _has_timed_out = true; 2215 return; 2216 } 2217 2218 // (6) Finally, we check whether there are enough completed STAB 2219 // buffers available for processing. If there are, we abort. 2220 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2221 if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) { 2222 // we do need to process SATB buffers, we'll abort and restart 2223 // the marking task to do so 2224 set_has_aborted(); 2225 return; 2226 } 2227 } 2228 2229 void G1CMTask::recalculate_limits() { 2230 _real_words_scanned_limit = _words_scanned + words_scanned_period; 2231 _words_scanned_limit = _real_words_scanned_limit; 2232 2233 _real_refs_reached_limit = _refs_reached + refs_reached_period; 2234 _refs_reached_limit = _real_refs_reached_limit; 2235 } 2236 2237 void G1CMTask::decrease_limits() { 2238 // This is called when we believe that we're going to do an infrequent 2239 // operation which will increase the per byte scanned cost (i.e. move 2240 // entries to/from the global stack). It basically tries to decrease the 2241 // scanning limit so that the clock is called earlier. 2242 2243 _words_scanned_limit = _real_words_scanned_limit - 3 * words_scanned_period / 4; 2244 _refs_reached_limit = _real_refs_reached_limit - 3 * refs_reached_period / 4; 2245 } 2246 2247 void G1CMTask::move_entries_to_global_stack() { 2248 // Local array where we'll store the entries that will be popped 2249 // from the local queue. 2250 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2251 2252 size_t n = 0; 2253 G1TaskQueueEntry task_entry; 2254 while (n < G1CMMarkStack::EntriesPerChunk && _task_queue->pop_local(task_entry)) { 2255 buffer[n] = task_entry; 2256 ++n; 2257 } 2258 if (n < G1CMMarkStack::EntriesPerChunk) { 2259 buffer[n] = G1TaskQueueEntry(); 2260 } 2261 2262 if (n > 0) { 2263 if (!_cm->mark_stack_push(buffer)) { 2264 set_has_aborted(); 2265 } 2266 } 2267 2268 // This operation was quite expensive, so decrease the limits. 2269 decrease_limits(); 2270 } 2271 2272 bool G1CMTask::get_entries_from_global_stack() { 2273 // Local array where we'll store the entries that will be popped 2274 // from the global stack. 2275 G1TaskQueueEntry buffer[G1CMMarkStack::EntriesPerChunk]; 2276 2277 if (!_cm->mark_stack_pop(buffer)) { 2278 return false; 2279 } 2280 2281 // We did actually pop at least one entry. 2282 for (size_t i = 0; i < G1CMMarkStack::EntriesPerChunk; ++i) { 2283 G1TaskQueueEntry task_entry = buffer[i]; 2284 if (task_entry.is_null()) { 2285 break; 2286 } 2287 assert(task_entry.is_array_slice() || oopDesc::is_oop(task_entry.obj()), "Element " PTR_FORMAT " must be an array slice or oop", p2i(task_entry.obj())); 2288 bool success = _task_queue->push(task_entry); 2289 // We only call this when the local queue is empty or under a 2290 // given target limit. So, we do not expect this push to fail. 2291 assert(success, "invariant"); 2292 } 2293 2294 // This operation was quite expensive, so decrease the limits 2295 decrease_limits(); 2296 return true; 2297 } 2298 2299 void G1CMTask::drain_local_queue(bool partially) { 2300 if (has_aborted()) { 2301 return; 2302 } 2303 2304 // Decide what the target size is, depending whether we're going to 2305 // drain it partially (so that other tasks can steal if they run out 2306 // of things to do) or totally (at the very end). 2307 size_t target_size; 2308 if (partially) { 2309 target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize); 2310 } else { 2311 target_size = 0; 2312 } 2313 2314 if (_task_queue->size() > target_size) { 2315 G1TaskQueueEntry entry; 2316 bool ret = _task_queue->pop_local(entry); 2317 while (ret) { 2318 scan_task_entry(entry); 2319 if (_task_queue->size() <= target_size || has_aborted()) { 2320 ret = false; 2321 } else { 2322 ret = _task_queue->pop_local(entry); 2323 } 2324 } 2325 } 2326 } 2327 2328 void G1CMTask::drain_global_stack(bool partially) { 2329 if (has_aborted()) return; 2330 2331 // We have a policy to drain the local queue before we attempt to 2332 // drain the global stack. 2333 assert(partially || _task_queue->size() == 0, "invariant"); 2334 2335 // Decide what the target size is, depending whether we're going to 2336 // drain it partially (so that other tasks can steal if they run out 2337 // of things to do) or totally (at the very end). 2338 // Notice that when draining the global mark stack partially, due to the racyness 2339 // of the mark stack size update we might in fact drop below the target. But, 2340 // this is not a problem. 2341 // In case of total draining, we simply process until the global mark stack is 2342 // totally empty, disregarding the size counter. 2343 if (partially) { 2344 size_t const target_size = _cm->partial_mark_stack_size_target(); 2345 while (!has_aborted() && _cm->mark_stack_size() > target_size) { 2346 if (get_entries_from_global_stack()) { 2347 drain_local_queue(partially); 2348 } 2349 } 2350 } else { 2351 while (!has_aborted() && get_entries_from_global_stack()) { 2352 drain_local_queue(partially); 2353 } 2354 } 2355 } 2356 2357 // SATB Queue has several assumptions on whether to call the par or 2358 // non-par versions of the methods. this is why some of the code is 2359 // replicated. We should really get rid of the single-threaded version 2360 // of the code to simplify things. 2361 void G1CMTask::drain_satb_buffers() { 2362 if (has_aborted()) return; 2363 2364 // We set this so that the regular clock knows that we're in the 2365 // middle of draining buffers and doesn't set the abort flag when it 2366 // notices that SATB buffers are available for draining. It'd be 2367 // very counter productive if it did that. :-) 2368 _draining_satb_buffers = true; 2369 2370 G1CMSATBBufferClosure satb_cl(this, _g1h); 2371 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2372 2373 // This keeps claiming and applying the closure to completed buffers 2374 // until we run out of buffers or we need to abort. 2375 while (!has_aborted() && 2376 satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) { 2377 regular_clock_call(); 2378 } 2379 2380 _draining_satb_buffers = false; 2381 2382 assert(has_aborted() || 2383 _concurrent || 2384 satb_mq_set.completed_buffers_num() == 0, "invariant"); 2385 2386 // again, this was a potentially expensive operation, decrease the 2387 // limits to get the regular clock call early 2388 decrease_limits(); 2389 } 2390 2391 void G1CMTask::print_stats() { 2392 log_debug(gc, stats)("Marking Stats, task = %u, calls = %u", 2393 _worker_id, _calls); 2394 log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", 2395 _elapsed_time_ms, _termination_time_ms); 2396 log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", 2397 _step_times_ms.num(), _step_times_ms.avg(), 2398 _step_times_ms.sd()); 2399 log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", 2400 _step_times_ms.maximum(), _step_times_ms.sum()); 2401 } 2402 2403 bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) { 2404 return _task_queues->steal(worker_id, hash_seed, task_entry); 2405 } 2406 2407 /***************************************************************************** 2408 2409 The do_marking_step(time_target_ms, ...) method is the building 2410 block of the parallel marking framework. It can be called in parallel 2411 with other invocations of do_marking_step() on different tasks 2412 (but only one per task, obviously) and concurrently with the 2413 mutator threads, or during remark, hence it eliminates the need 2414 for two versions of the code. When called during remark, it will 2415 pick up from where the task left off during the concurrent marking 2416 phase. Interestingly, tasks are also claimable during evacuation 2417 pauses too, since do_marking_step() ensures that it aborts before 2418 it needs to yield. 2419 2420 The data structures that it uses to do marking work are the 2421 following: 2422 2423 (1) Marking Bitmap. If there are gray objects that appear only 2424 on the bitmap (this happens either when dealing with an overflow 2425 or when the initial marking phase has simply marked the roots 2426 and didn't push them on the stack), then tasks claim heap 2427 regions whose bitmap they then scan to find gray objects. A 2428 global finger indicates where the end of the last claimed region 2429 is. A local finger indicates how far into the region a task has 2430 scanned. The two fingers are used to determine how to gray an 2431 object (i.e. whether simply marking it is OK, as it will be 2432 visited by a task in the future, or whether it needs to be also 2433 pushed on a stack). 2434 2435 (2) Local Queue. The local queue of the task which is accessed 2436 reasonably efficiently by the task. Other tasks can steal from 2437 it when they run out of work. Throughout the marking phase, a 2438 task attempts to keep its local queue short but not totally 2439 empty, so that entries are available for stealing by other 2440 tasks. Only when there is no more work, a task will totally 2441 drain its local queue. 2442 2443 (3) Global Mark Stack. This handles local queue overflow. During 2444 marking only sets of entries are moved between it and the local 2445 queues, as access to it requires a mutex and more fine-grain 2446 interaction with it which might cause contention. If it 2447 overflows, then the marking phase should restart and iterate 2448 over the bitmap to identify gray objects. Throughout the marking 2449 phase, tasks attempt to keep the global mark stack at a small 2450 length but not totally empty, so that entries are available for 2451 popping by other tasks. Only when there is no more work, tasks 2452 will totally drain the global mark stack. 2453 2454 (4) SATB Buffer Queue. This is where completed SATB buffers are 2455 made available. Buffers are regularly removed from this queue 2456 and scanned for roots, so that the queue doesn't get too 2457 long. During remark, all completed buffers are processed, as 2458 well as the filled in parts of any uncompleted buffers. 2459 2460 The do_marking_step() method tries to abort when the time target 2461 has been reached. There are a few other cases when the 2462 do_marking_step() method also aborts: 2463 2464 (1) When the marking phase has been aborted (after a Full GC). 2465 2466 (2) When a global overflow (on the global stack) has been 2467 triggered. Before the task aborts, it will actually sync up with 2468 the other tasks to ensure that all the marking data structures 2469 (local queues, stacks, fingers etc.) are re-initialized so that 2470 when do_marking_step() completes, the marking phase can 2471 immediately restart. 2472 2473 (3) When enough completed SATB buffers are available. The 2474 do_marking_step() method only tries to drain SATB buffers right 2475 at the beginning. So, if enough buffers are available, the 2476 marking step aborts and the SATB buffers are processed at 2477 the beginning of the next invocation. 2478 2479 (4) To yield. when we have to yield then we abort and yield 2480 right at the end of do_marking_step(). This saves us from a lot 2481 of hassle as, by yielding we might allow a Full GC. If this 2482 happens then objects will be compacted underneath our feet, the 2483 heap might shrink, etc. We save checking for this by just 2484 aborting and doing the yield right at the end. 2485 2486 From the above it follows that the do_marking_step() method should 2487 be called in a loop (or, otherwise, regularly) until it completes. 2488 2489 If a marking step completes without its has_aborted() flag being 2490 true, it means it has completed the current marking phase (and 2491 also all other marking tasks have done so and have all synced up). 2492 2493 A method called regular_clock_call() is invoked "regularly" (in 2494 sub ms intervals) throughout marking. It is this clock method that 2495 checks all the abort conditions which were mentioned above and 2496 decides when the task should abort. A work-based scheme is used to 2497 trigger this clock method: when the number of object words the 2498 marking phase has scanned or the number of references the marking 2499 phase has visited reach a given limit. Additional invocations to 2500 the method clock have been planted in a few other strategic places 2501 too. The initial reason for the clock method was to avoid calling 2502 vtime too regularly, as it is quite expensive. So, once it was in 2503 place, it was natural to piggy-back all the other conditions on it 2504 too and not constantly check them throughout the code. 2505 2506 If do_termination is true then do_marking_step will enter its 2507 termination protocol. 2508 2509 The value of is_serial must be true when do_marking_step is being 2510 called serially (i.e. by the VMThread) and do_marking_step should 2511 skip any synchronization in the termination and overflow code. 2512 Examples include the serial remark code and the serial reference 2513 processing closures. 2514 2515 The value of is_serial must be false when do_marking_step is 2516 being called by any of the worker threads in a work gang. 2517 Examples include the concurrent marking code (CMMarkingTask), 2518 the MT remark code, and the MT reference processing closures. 2519 2520 *****************************************************************************/ 2521 2522 void G1CMTask::do_marking_step(double time_target_ms, 2523 bool do_termination, 2524 bool is_serial) { 2525 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 2526 assert(_concurrent == _cm->concurrent(), "they should be the same"); 2527 2528 _start_time_ms = os::elapsedVTime() * 1000.0; 2529 2530 // If do_stealing is true then do_marking_step will attempt to 2531 // steal work from the other G1CMTasks. It only makes sense to 2532 // enable stealing when the termination protocol is enabled 2533 // and do_marking_step() is not being called serially. 2534 bool do_stealing = do_termination && !is_serial; 2535 2536 double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms); 2537 _time_target_ms = time_target_ms - diff_prediction_ms; 2538 2539 // set up the variables that are used in the work-based scheme to 2540 // call the regular clock method 2541 _words_scanned = 0; 2542 _refs_reached = 0; 2543 recalculate_limits(); 2544 2545 // clear all flags 2546 clear_has_aborted(); 2547 _has_timed_out = false; 2548 _draining_satb_buffers = false; 2549 2550 ++_calls; 2551 2552 // Set up the bitmap and oop closures. Anything that uses them is 2553 // eventually called from this method, so it is OK to allocate these 2554 // statically. 2555 G1CMBitMapClosure bitmap_closure(this, _cm); 2556 G1CMOopClosure cm_oop_closure(_g1h, _cm, this); 2557 set_cm_oop_closure(&cm_oop_closure); 2558 2559 if (_cm->has_overflown()) { 2560 // This can happen if the mark stack overflows during a GC pause 2561 // and this task, after a yield point, restarts. We have to abort 2562 // as we need to get into the overflow protocol which happens 2563 // right at the end of this task. 2564 set_has_aborted(); 2565 } 2566 2567 // First drain any available SATB buffers. After this, we will not 2568 // look at SATB buffers before the next invocation of this method. 2569 // If enough completed SATB buffers are queued up, the regular clock 2570 // will abort this task so that it restarts. 2571 drain_satb_buffers(); 2572 // ...then partially drain the local queue and the global stack 2573 drain_local_queue(true); 2574 drain_global_stack(true); 2575 2576 do { 2577 if (!has_aborted() && _curr_region != NULL) { 2578 // This means that we're already holding on to a region. 2579 assert(_finger != NULL, "if region is not NULL, then the finger " 2580 "should not be NULL either"); 2581 2582 // We might have restarted this task after an evacuation pause 2583 // which might have evacuated the region we're holding on to 2584 // underneath our feet. Let's read its limit again to make sure 2585 // that we do not iterate over a region of the heap that 2586 // contains garbage (update_region_limit() will also move 2587 // _finger to the start of the region if it is found empty). 2588 update_region_limit(); 2589 // We will start from _finger not from the start of the region, 2590 // as we might be restarting this task after aborting half-way 2591 // through scanning this region. In this case, _finger points to 2592 // the address where we last found a marked object. If this is a 2593 // fresh region, _finger points to start(). 2594 MemRegion mr = MemRegion(_finger, _region_limit); 2595 2596 assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(), 2597 "humongous regions should go around loop once only"); 2598 2599 // Some special cases: 2600 // If the memory region is empty, we can just give up the region. 2601 // If the current region is humongous then we only need to check 2602 // the bitmap for the bit associated with the start of the object, 2603 // scan the object if it's live, and give up the region. 2604 // Otherwise, let's iterate over the bitmap of the part of the region 2605 // that is left. 2606 // If the iteration is successful, give up the region. 2607 if (mr.is_empty()) { 2608 giveup_current_region(); 2609 regular_clock_call(); 2610 } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) { 2611 if (_next_mark_bitmap->is_marked(mr.start())) { 2612 // The object is marked - apply the closure 2613 bitmap_closure.do_addr(mr.start()); 2614 } 2615 // Even if this task aborted while scanning the humongous object 2616 // we can (and should) give up the current region. 2617 giveup_current_region(); 2618 regular_clock_call(); 2619 } else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) { 2620 giveup_current_region(); 2621 regular_clock_call(); 2622 } else { 2623 assert(has_aborted(), "currently the only way to do so"); 2624 // The only way to abort the bitmap iteration is to return 2625 // false from the do_bit() method. However, inside the 2626 // do_bit() method we move the _finger to point to the 2627 // object currently being looked at. So, if we bail out, we 2628 // have definitely set _finger to something non-null. 2629 assert(_finger != NULL, "invariant"); 2630 2631 // Region iteration was actually aborted. So now _finger 2632 // points to the address of the object we last scanned. If we 2633 // leave it there, when we restart this task, we will rescan 2634 // the object. It is easy to avoid this. We move the finger by 2635 // enough to point to the next possible object header. 2636 assert(_finger < _region_limit, "invariant"); 2637 HeapWord* const new_finger = _finger + ((oop)_finger)->size(); 2638 // Check if bitmap iteration was aborted while scanning the last object 2639 if (new_finger >= _region_limit) { 2640 giveup_current_region(); 2641 } else { 2642 move_finger_to(new_finger); 2643 } 2644 } 2645 } 2646 // At this point we have either completed iterating over the 2647 // region we were holding on to, or we have aborted. 2648 2649 // We then partially drain the local queue and the global stack. 2650 // (Do we really need this?) 2651 drain_local_queue(true); 2652 drain_global_stack(true); 2653 2654 // Read the note on the claim_region() method on why it might 2655 // return NULL with potentially more regions available for 2656 // claiming and why we have to check out_of_regions() to determine 2657 // whether we're done or not. 2658 while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) { 2659 // We are going to try to claim a new region. We should have 2660 // given up on the previous one. 2661 // Separated the asserts so that we know which one fires. 2662 assert(_curr_region == NULL, "invariant"); 2663 assert(_finger == NULL, "invariant"); 2664 assert(_region_limit == NULL, "invariant"); 2665 HeapRegion* claimed_region = _cm->claim_region(_worker_id); 2666 if (claimed_region != NULL) { 2667 // Yes, we managed to claim one 2668 setup_for_region(claimed_region); 2669 assert(_curr_region == claimed_region, "invariant"); 2670 } 2671 // It is important to call the regular clock here. It might take 2672 // a while to claim a region if, for example, we hit a large 2673 // block of empty regions. So we need to call the regular clock 2674 // method once round the loop to make sure it's called 2675 // frequently enough. 2676 regular_clock_call(); 2677 } 2678 2679 if (!has_aborted() && _curr_region == NULL) { 2680 assert(_cm->out_of_regions(), 2681 "at this point we should be out of regions"); 2682 } 2683 } while ( _curr_region != NULL && !has_aborted()); 2684 2685 if (!has_aborted()) { 2686 // We cannot check whether the global stack is empty, since other 2687 // tasks might be pushing objects to it concurrently. 2688 assert(_cm->out_of_regions(), 2689 "at this point we should be out of regions"); 2690 // Try to reduce the number of available SATB buffers so that 2691 // remark has less work to do. 2692 drain_satb_buffers(); 2693 } 2694 2695 // Since we've done everything else, we can now totally drain the 2696 // local queue and global stack. 2697 drain_local_queue(false); 2698 drain_global_stack(false); 2699 2700 // Attempt at work stealing from other task's queues. 2701 if (do_stealing && !has_aborted()) { 2702 // We have not aborted. This means that we have finished all that 2703 // we could. Let's try to do some stealing... 2704 2705 // We cannot check whether the global stack is empty, since other 2706 // tasks might be pushing objects to it concurrently. 2707 assert(_cm->out_of_regions() && _task_queue->size() == 0, 2708 "only way to reach here"); 2709 while (!has_aborted()) { 2710 G1TaskQueueEntry entry; 2711 if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) { 2712 scan_task_entry(entry); 2713 2714 // And since we're towards the end, let's totally drain the 2715 // local queue and global stack. 2716 drain_local_queue(false); 2717 drain_global_stack(false); 2718 } else { 2719 break; 2720 } 2721 } 2722 } 2723 2724 // We still haven't aborted. Now, let's try to get into the 2725 // termination protocol. 2726 if (do_termination && !has_aborted()) { 2727 // We cannot check whether the global stack is empty, since other 2728 // tasks might be concurrently pushing objects on it. 2729 // Separated the asserts so that we know which one fires. 2730 assert(_cm->out_of_regions(), "only way to reach here"); 2731 assert(_task_queue->size() == 0, "only way to reach here"); 2732 _termination_start_time_ms = os::elapsedVTime() * 1000.0; 2733 2734 // The G1CMTask class also extends the TerminatorTerminator class, 2735 // hence its should_exit_termination() method will also decide 2736 // whether to exit the termination protocol or not. 2737 bool finished = (is_serial || 2738 _cm->terminator()->offer_termination(this)); 2739 double termination_end_time_ms = os::elapsedVTime() * 1000.0; 2740 _termination_time_ms += 2741 termination_end_time_ms - _termination_start_time_ms; 2742 2743 if (finished) { 2744 // We're all done. 2745 2746 if (_worker_id == 0) { 2747 // Let's allow task 0 to do this 2748 if (_concurrent) { 2749 assert(_cm->concurrent_marking_in_progress(), "invariant"); 2750 // We need to set this to false before the next 2751 // safepoint. This way we ensure that the marking phase 2752 // doesn't observe any more heap expansions. 2753 _cm->clear_concurrent_marking_in_progress(); 2754 } 2755 } 2756 2757 // We can now guarantee that the global stack is empty, since 2758 // all other tasks have finished. We separated the guarantees so 2759 // that, if a condition is false, we can immediately find out 2760 // which one. 2761 guarantee(_cm->out_of_regions(), "only way to reach here"); 2762 guarantee(_cm->mark_stack_empty(), "only way to reach here"); 2763 guarantee(_task_queue->size() == 0, "only way to reach here"); 2764 guarantee(!_cm->has_overflown(), "only way to reach here"); 2765 } else { 2766 // Apparently there's more work to do. Let's abort this task. It 2767 // will restart it and we can hopefully find more things to do. 2768 set_has_aborted(); 2769 } 2770 } 2771 2772 // Mainly for debugging purposes to make sure that a pointer to the 2773 // closure which was statically allocated in this frame doesn't 2774 // escape it by accident. 2775 set_cm_oop_closure(NULL); 2776 double end_time_ms = os::elapsedVTime() * 1000.0; 2777 double elapsed_time_ms = end_time_ms - _start_time_ms; 2778 // Update the step history. 2779 _step_times_ms.add(elapsed_time_ms); 2780 2781 if (has_aborted()) { 2782 // The task was aborted for some reason. 2783 if (_has_timed_out) { 2784 double diff_ms = elapsed_time_ms - _time_target_ms; 2785 // Keep statistics of how well we did with respect to hitting 2786 // our target only if we actually timed out (if we aborted for 2787 // other reasons, then the results might get skewed). 2788 _marking_step_diffs_ms.add(diff_ms); 2789 } 2790 2791 if (_cm->has_overflown()) { 2792 // This is the interesting one. We aborted because a global 2793 // overflow was raised. This means we have to restart the 2794 // marking phase and start iterating over regions. However, in 2795 // order to do this we have to make sure that all tasks stop 2796 // what they are doing and re-initialize in a safe manner. We 2797 // will achieve this with the use of two barrier sync points. 2798 2799 if (!is_serial) { 2800 // We only need to enter the sync barrier if being called 2801 // from a parallel context 2802 _cm->enter_first_sync_barrier(_worker_id); 2803 2804 // When we exit this sync barrier we know that all tasks have 2805 // stopped doing marking work. So, it's now safe to 2806 // re-initialize our data structures. At the end of this method, 2807 // task 0 will clear the global data structures. 2808 } 2809 2810 // We clear the local state of this task... 2811 clear_region_fields(); 2812 2813 if (!is_serial) { 2814 // ...and enter the second barrier. 2815 _cm->enter_second_sync_barrier(_worker_id); 2816 } 2817 // At this point, if we're during the concurrent phase of 2818 // marking, everything has been re-initialized and we're 2819 // ready to restart. 2820 } 2821 } 2822 } 2823 2824 G1CMTask::G1CMTask(uint worker_id, G1ConcurrentMark* cm, G1CMTaskQueue* task_queue) : 2825 _objArray_processor(this), 2826 _worker_id(worker_id), 2827 _g1h(G1CollectedHeap::heap()), 2828 _cm(cm), 2829 _next_mark_bitmap(NULL), 2830 _task_queue(task_queue), 2831 _calls(0), 2832 _time_target_ms(0.0), 2833 _start_time_ms(0.0), 2834 _cm_oop_closure(NULL), 2835 _curr_region(NULL), 2836 _finger(NULL), 2837 _region_limit(NULL), 2838 _words_scanned(0), 2839 _words_scanned_limit(0), 2840 _real_words_scanned_limit(0), 2841 _refs_reached(0), 2842 _refs_reached_limit(0), 2843 _real_refs_reached_limit(0), 2844 _hash_seed(17), 2845 _has_aborted(false), 2846 _has_timed_out(false), 2847 _draining_satb_buffers(false), 2848 _step_times_ms(), 2849 _elapsed_time_ms(0.0), 2850 _termination_time_ms(0.0), 2851 _termination_start_time_ms(0.0), 2852 _concurrent(false), 2853 _marking_step_diffs_ms() 2854 { 2855 guarantee(task_queue != NULL, "invariant"); 2856 2857 _marking_step_diffs_ms.add(0.5); 2858 } 2859 2860 // These are formatting macros that are used below to ensure 2861 // consistent formatting. The *_H_* versions are used to format the 2862 // header for a particular value and they should be kept consistent 2863 // with the corresponding macro. Also note that most of the macros add 2864 // the necessary white space (as a prefix) which makes them a bit 2865 // easier to compose. 2866 2867 // All the output lines are prefixed with this string to be able to 2868 // identify them easily in a large log file. 2869 #define G1PPRL_LINE_PREFIX "###" 2870 2871 #define G1PPRL_ADDR_BASE_FORMAT " " PTR_FORMAT "-" PTR_FORMAT 2872 #ifdef _LP64 2873 #define G1PPRL_ADDR_BASE_H_FORMAT " %37s" 2874 #else // _LP64 2875 #define G1PPRL_ADDR_BASE_H_FORMAT " %21s" 2876 #endif // _LP64 2877 2878 // For per-region info 2879 #define G1PPRL_TYPE_FORMAT " %-4s" 2880 #define G1PPRL_TYPE_H_FORMAT " %4s" 2881 #define G1PPRL_BYTE_FORMAT " " SIZE_FORMAT_W(9) 2882 #define G1PPRL_BYTE_H_FORMAT " %9s" 2883 #define G1PPRL_DOUBLE_FORMAT " %14.1f" 2884 #define G1PPRL_DOUBLE_H_FORMAT " %14s" 2885 2886 // For summary info 2887 #define G1PPRL_SUM_ADDR_FORMAT(tag) " " tag ":" G1PPRL_ADDR_BASE_FORMAT 2888 #define G1PPRL_SUM_BYTE_FORMAT(tag) " " tag ": " SIZE_FORMAT 2889 #define G1PPRL_SUM_MB_FORMAT(tag) " " tag ": %1.2f MB" 2890 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" 2891 2892 G1PrintRegionLivenessInfoClosure::G1PrintRegionLivenessInfoClosure(const char* phase_name) : 2893 _total_used_bytes(0), _total_capacity_bytes(0), 2894 _total_prev_live_bytes(0), _total_next_live_bytes(0), 2895 _total_remset_bytes(0), _total_strong_code_roots_bytes(0) 2896 { 2897 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2898 MemRegion g1_reserved = g1h->g1_reserved(); 2899 double now = os::elapsedTime(); 2900 2901 // Print the header of the output. 2902 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); 2903 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" 2904 G1PPRL_SUM_ADDR_FORMAT("reserved") 2905 G1PPRL_SUM_BYTE_FORMAT("region-size"), 2906 p2i(g1_reserved.start()), p2i(g1_reserved.end()), 2907 HeapRegion::GrainBytes); 2908 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2909 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2910 G1PPRL_TYPE_H_FORMAT 2911 G1PPRL_ADDR_BASE_H_FORMAT 2912 G1PPRL_BYTE_H_FORMAT 2913 G1PPRL_BYTE_H_FORMAT 2914 G1PPRL_BYTE_H_FORMAT 2915 G1PPRL_DOUBLE_H_FORMAT 2916 G1PPRL_BYTE_H_FORMAT 2917 G1PPRL_BYTE_H_FORMAT, 2918 "type", "address-range", 2919 "used", "prev-live", "next-live", "gc-eff", 2920 "remset", "code-roots"); 2921 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2922 G1PPRL_TYPE_H_FORMAT 2923 G1PPRL_ADDR_BASE_H_FORMAT 2924 G1PPRL_BYTE_H_FORMAT 2925 G1PPRL_BYTE_H_FORMAT 2926 G1PPRL_BYTE_H_FORMAT 2927 G1PPRL_DOUBLE_H_FORMAT 2928 G1PPRL_BYTE_H_FORMAT 2929 G1PPRL_BYTE_H_FORMAT, 2930 "", "", 2931 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", 2932 "(bytes)", "(bytes)"); 2933 } 2934 2935 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { 2936 const char* type = r->get_type_str(); 2937 HeapWord* bottom = r->bottom(); 2938 HeapWord* end = r->end(); 2939 size_t capacity_bytes = r->capacity(); 2940 size_t used_bytes = r->used(); 2941 size_t prev_live_bytes = r->live_bytes(); 2942 size_t next_live_bytes = r->next_live_bytes(); 2943 double gc_eff = r->gc_efficiency(); 2944 size_t remset_bytes = r->rem_set()->mem_size(); 2945 size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size(); 2946 2947 _total_used_bytes += used_bytes; 2948 _total_capacity_bytes += capacity_bytes; 2949 _total_prev_live_bytes += prev_live_bytes; 2950 _total_next_live_bytes += next_live_bytes; 2951 _total_remset_bytes += remset_bytes; 2952 _total_strong_code_roots_bytes += strong_code_roots_bytes; 2953 2954 // Print a line for this particular region. 2955 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2956 G1PPRL_TYPE_FORMAT 2957 G1PPRL_ADDR_BASE_FORMAT 2958 G1PPRL_BYTE_FORMAT 2959 G1PPRL_BYTE_FORMAT 2960 G1PPRL_BYTE_FORMAT 2961 G1PPRL_DOUBLE_FORMAT 2962 G1PPRL_BYTE_FORMAT 2963 G1PPRL_BYTE_FORMAT, 2964 type, p2i(bottom), p2i(end), 2965 used_bytes, prev_live_bytes, next_live_bytes, gc_eff, 2966 remset_bytes, strong_code_roots_bytes); 2967 2968 return false; 2969 } 2970 2971 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { 2972 // add static memory usages to remembered set sizes 2973 _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); 2974 // Print the footer of the output. 2975 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); 2976 log_trace(gc, liveness)(G1PPRL_LINE_PREFIX 2977 " SUMMARY" 2978 G1PPRL_SUM_MB_FORMAT("capacity") 2979 G1PPRL_SUM_MB_PERC_FORMAT("used") 2980 G1PPRL_SUM_MB_PERC_FORMAT("prev-live") 2981 G1PPRL_SUM_MB_PERC_FORMAT("next-live") 2982 G1PPRL_SUM_MB_FORMAT("remset") 2983 G1PPRL_SUM_MB_FORMAT("code-roots"), 2984 bytes_to_mb(_total_capacity_bytes), 2985 bytes_to_mb(_total_used_bytes), 2986 percent_of(_total_used_bytes, _total_capacity_bytes), 2987 bytes_to_mb(_total_prev_live_bytes), 2988 percent_of(_total_prev_live_bytes, _total_capacity_bytes), 2989 bytes_to_mb(_total_next_live_bytes), 2990 percent_of(_total_next_live_bytes, _total_capacity_bytes), 2991 bytes_to_mb(_total_remset_bytes), 2992 bytes_to_mb(_total_strong_code_roots_bytes)); 2993 }