1 /* 2 * Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "code/codeCache.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/preservedMarks.inline.hpp" 30 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 31 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 32 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 36 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 37 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeuristics.hpp" 40 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 42 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 43 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 44 #include "gc/shenandoah/shenandoahUtils.hpp" 45 #include "gc/shenandoah/shenandoahVerifier.hpp" 46 #include "gc/shenandoah/shenandoahVMOperations.hpp" 47 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 48 #include "memory/metaspace.hpp" 49 #include "memory/universe.hpp" 50 #include "oops/compressedOops.inline.hpp" 51 #include "oops/oop.inline.hpp" 52 #include "runtime/biasedLocking.hpp" 53 #include "runtime/orderAccess.hpp" 54 #include "runtime/thread.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/growableArray.hpp" 57 #include "gc/shared/workgroup.hpp" 58 59 ShenandoahMarkCompact::ShenandoahMarkCompact() : 60 _gc_timer(NULL), 61 _preserved_marks(new PreservedMarksSet(true)) {} 62 63 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { 64 _gc_timer = gc_timer; 65 } 66 67 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { 68 ShenandoahHeap* heap = ShenandoahHeap::heap(); 69 70 if (ShenandoahVerify) { 71 heap->verifier()->verify_before_fullgc(); 72 } 73 74 if (VerifyBeforeGC) { 75 Universe::verify(); 76 } 77 78 // Degenerated GC may carry concurrent_root_in_progress flag when upgrading to 79 // full GC. We need to reset it before mutators resume. 80 if (ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) { 81 heap->set_concurrent_root_in_progress(false); 82 } 83 84 heap->set_full_gc_in_progress(true); 85 86 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 87 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 88 89 { 90 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 91 heap->pre_full_gc_dump(_gc_timer); 92 } 93 94 { 95 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 96 // Full GC is supposed to recover from any GC state: 97 98 // a0. Remember if we have forwarded objects 99 bool has_forwarded_objects = heap->has_forwarded_objects(); 100 101 // a1. Cancel evacuation, if in progress 102 if (heap->is_evacuation_in_progress()) { 103 heap->set_evacuation_in_progress(false); 104 } 105 assert(!heap->is_evacuation_in_progress(), "sanity"); 106 107 // a2. Cancel update-refs, if in progress 108 if (heap->is_update_refs_in_progress()) { 109 heap->set_update_refs_in_progress(false); 110 } 111 assert(!heap->is_update_refs_in_progress(), "sanity"); 112 113 // a3. Cancel concurrent traversal GC, if in progress 114 if (heap->is_concurrent_traversal_in_progress()) { 115 heap->traversal_gc()->reset(); 116 heap->set_concurrent_traversal_in_progress(false); 117 } 118 119 // b. Cancel concurrent mark, if in progress 120 if (heap->is_concurrent_mark_in_progress()) { 121 heap->concurrent_mark()->cancel(); 122 heap->set_concurrent_mark_in_progress(false); 123 } 124 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 125 126 // c. Reset the bitmaps for new marking 127 heap->reset_mark_bitmap(); 128 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 129 assert(!heap->marking_context()->is_complete(), "sanity"); 130 131 // d. Abandon reference discovery and clear all discovered references. 132 ReferenceProcessor* rp = heap->ref_processor(); 133 rp->disable_discovery(); 134 rp->abandon_partial_discovery(); 135 rp->verify_no_references_recorded(); 136 137 // e. Set back forwarded objects bit back, in case some steps above dropped it. 138 heap->set_has_forwarded_objects(has_forwarded_objects); 139 140 // f. Sync pinned region status from the CP marks 141 heap->sync_pinned_region_status(); 142 143 // The rest of prologue: 144 BiasedLocking::preserve_marks(); 145 _preserved_marks->init(heap->workers()->active_workers()); 146 } 147 148 heap->make_parsable(true); 149 150 OrderAccess::fence(); 151 152 phase1_mark_heap(); 153 154 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 155 // Coming out of Full GC, we would not have any forwarded objects. 156 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 157 heap->set_has_forwarded_objects(false); 158 159 heap->set_full_gc_move_in_progress(true); 160 161 // Setup workers for the rest 162 OrderAccess::fence(); 163 164 // Initialize worker slices 165 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 166 for (uint i = 0; i < heap->max_workers(); i++) { 167 worker_slices[i] = new ShenandoahHeapRegionSet(); 168 } 169 170 { 171 // The rest of code performs region moves, where region status is undefined 172 // until all phases run together. 173 ShenandoahHeapLocker lock(heap->lock()); 174 175 phase2_calculate_target_addresses(worker_slices); 176 177 OrderAccess::fence(); 178 179 phase3_update_references(); 180 181 phase4_compact_objects(worker_slices); 182 } 183 184 { 185 // Epilogue 186 _preserved_marks->restore(heap->workers()); 187 BiasedLocking::restore_marks(); 188 _preserved_marks->reclaim(); 189 } 190 191 // Resize metaspace 192 MetaspaceGC::compute_new_size(); 193 194 // Free worker slices 195 for (uint i = 0; i < heap->max_workers(); i++) { 196 delete worker_slices[i]; 197 } 198 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 199 200 heap->set_full_gc_move_in_progress(false); 201 heap->set_full_gc_in_progress(false); 202 203 if (ShenandoahVerify) { 204 heap->verifier()->verify_after_fullgc(); 205 } 206 207 if (VerifyAfterGC) { 208 Universe::verify(); 209 } 210 211 { 212 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 213 heap->post_full_gc_dump(_gc_timer); 214 } 215 } 216 217 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 218 private: 219 ShenandoahMarkingContext* const _ctx; 220 221 public: 222 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 223 224 void heap_region_do(ShenandoahHeapRegion *r) { 225 _ctx->capture_top_at_mark_start(r); 226 r->clear_live_data(); 227 r->set_concurrent_iteration_safe_limit(r->top()); 228 } 229 }; 230 231 void ShenandoahMarkCompact::phase1_mark_heap() { 232 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 233 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 234 235 ShenandoahHeap* heap = ShenandoahHeap::heap(); 236 237 ShenandoahPrepareForMarkClosure cl; 238 heap->heap_region_iterate(&cl); 239 240 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 241 242 heap->set_process_references(heap->heuristics()->can_process_references()); 243 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 244 245 ReferenceProcessor* rp = heap->ref_processor(); 246 // enable ("weak") refs discovery 247 rp->enable_discovery(true /*verify_no_refs*/); 248 rp->setup_policy(true); // forcefully purge all soft references 249 rp->set_active_mt_degree(heap->workers()->active_workers()); 250 251 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots); 252 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots); 253 cm->finish_mark_from_roots(/* full_gc = */ true); 254 heap->mark_complete_marking_context(); 255 heap->parallel_cleaning(true /* full_gc */); 256 } 257 258 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 259 private: 260 PreservedMarks* const _preserved_marks; 261 ShenandoahHeap* const _heap; 262 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 263 int _empty_regions_pos; 264 ShenandoahHeapRegion* _to_region; 265 ShenandoahHeapRegion* _from_region; 266 HeapWord* _compact_point; 267 268 public: 269 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 270 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 271 ShenandoahHeapRegion* to_region) : 272 _preserved_marks(preserved_marks), 273 _heap(ShenandoahHeap::heap()), 274 _empty_regions(empty_regions), 275 _empty_regions_pos(0), 276 _to_region(to_region), 277 _from_region(NULL), 278 _compact_point(to_region->bottom()) {} 279 280 void set_from_region(ShenandoahHeapRegion* from_region) { 281 _from_region = from_region; 282 } 283 284 void finish_region() { 285 assert(_to_region != NULL, "should not happen"); 286 _to_region->set_new_top(_compact_point); 287 } 288 289 bool is_compact_same_region() { 290 return _from_region == _to_region; 291 } 292 293 int empty_regions_pos() { 294 return _empty_regions_pos; 295 } 296 297 void do_object(oop p) { 298 assert(_from_region != NULL, "must set before work"); 299 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 300 assert(!_heap->complete_marking_context()->allocated_after_mark_start(cast_from_oop<HeapWord*>(p)), "must be truly marked"); 301 302 size_t obj_size = p->size(); 303 if (_compact_point + obj_size > _to_region->end()) { 304 finish_region(); 305 306 // Object doesn't fit. Pick next empty region and start compacting there. 307 ShenandoahHeapRegion* new_to_region; 308 if (_empty_regions_pos < _empty_regions.length()) { 309 new_to_region = _empty_regions.at(_empty_regions_pos); 310 _empty_regions_pos++; 311 } else { 312 // Out of empty region? Compact within the same region. 313 new_to_region = _from_region; 314 } 315 316 assert(new_to_region != _to_region, "must not reuse same to-region"); 317 assert(new_to_region != NULL, "must not be NULL"); 318 _to_region = new_to_region; 319 _compact_point = _to_region->bottom(); 320 } 321 322 // Object fits into current region, record new location: 323 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 324 shenandoah_assert_not_forwarded(NULL, p); 325 _preserved_marks->push_if_necessary(p, p->mark_raw()); 326 p->forward_to(oop(_compact_point)); 327 _compact_point += obj_size; 328 } 329 }; 330 331 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 332 private: 333 PreservedMarksSet* const _preserved_marks; 334 ShenandoahHeap* const _heap; 335 ShenandoahHeapRegionSet** const _worker_slices; 336 ShenandoahRegionIterator _heap_regions; 337 338 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) { 339 ShenandoahHeapRegion* from_region = _heap_regions.next(); 340 341 // Look for next candidate for this slice: 342 while (from_region != NULL) { 343 // Empty region: get it into the slice to defragment the slice itself. 344 // We could have skipped this without violating correctness, but we really 345 // want to compact all live regions to the start of the heap, which sometimes 346 // means moving them into the fully empty regions. 347 if (from_region->is_empty()) break; 348 349 // Can move the region, and this is not the humongous region. Humongous 350 // moves are special cased here, because their moves are handled separately. 351 if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break; 352 353 from_region = _heap_regions.next(); 354 } 355 356 if (from_region != NULL) { 357 assert(slice != NULL, "sanity"); 358 assert(!from_region->is_humongous(), "this path cannot handle humongous regions"); 359 assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact"); 360 slice->add_region(from_region); 361 } 362 363 return from_region; 364 } 365 366 public: 367 ShenandoahPrepareForCompactionTask(PreservedMarksSet* preserved_marks, ShenandoahHeapRegionSet** worker_slices) : 368 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 369 _preserved_marks(preserved_marks), 370 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 371 } 372 373 void work(uint worker_id) { 374 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 375 ShenandoahHeapRegion* from_region = next_from_region(slice); 376 // No work? 377 if (from_region == NULL) { 378 return; 379 } 380 381 // Sliding compaction. Walk all regions in the slice, and compact them. 382 // Remember empty regions and reuse them as needed. 383 ResourceMark rm; 384 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 385 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 386 while (from_region != NULL) { 387 cl.set_from_region(from_region); 388 if (from_region->has_live()) { 389 _heap->marked_object_iterate(from_region, &cl); 390 } 391 392 // Compacted the region to somewhere else? From-region is empty then. 393 if (!cl.is_compact_same_region()) { 394 empty_regions.append(from_region); 395 } 396 from_region = next_from_region(slice); 397 } 398 cl.finish_region(); 399 400 // Mark all remaining regions as empty 401 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 402 ShenandoahHeapRegion* r = empty_regions.at(pos); 403 r->set_new_top(r->bottom()); 404 } 405 } 406 }; 407 408 void ShenandoahMarkCompact::calculate_target_humongous_objects() { 409 ShenandoahHeap* heap = ShenandoahHeap::heap(); 410 411 // Compute the new addresses for humongous objects. We need to do this after addresses 412 // for regular objects are calculated, and we know what regions in heap suffix are 413 // available for humongous moves. 414 // 415 // Scan the heap backwards, because we are compacting humongous regions towards the end. 416 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 417 // humongous start there. 418 // 419 // The complication is potential non-movable regions during the scan. If such region is 420 // detected, then sliding restarts towards that non-movable region. 421 422 size_t to_begin = heap->num_regions(); 423 size_t to_end = heap->num_regions(); 424 425 for (size_t c = heap->num_regions(); c > 0; c--) { 426 ShenandoahHeapRegion *r = heap->get_region(c - 1); 427 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 428 // To-region candidate: record this, and continue scan 429 to_begin = r->region_number(); 430 continue; 431 } 432 433 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 434 // From-region candidate: movable humongous region 435 oop old_obj = oop(r->bottom()); 436 size_t words_size = old_obj->size(); 437 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 438 439 size_t start = to_end - num_regions; 440 441 if (start >= to_begin && start != r->region_number()) { 442 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 443 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw()); 444 old_obj->forward_to(oop(heap->get_region(start)->bottom())); 445 to_end = start; 446 continue; 447 } 448 } 449 450 // Failed to fit. Scan starting from current region. 451 to_begin = r->region_number(); 452 to_end = r->region_number(); 453 } 454 } 455 456 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 457 private: 458 ShenandoahHeap* const _heap; 459 460 public: 461 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 462 void heap_region_do(ShenandoahHeapRegion* r) { 463 if (r->is_trash()) { 464 r->recycle(); 465 } 466 if (r->is_cset()) { 467 r->make_regular_bypass(); 468 } 469 if (r->is_empty_uncommitted()) { 470 r->make_committed_bypass(); 471 } 472 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number()); 473 474 // Record current region occupancy: this communicates empty regions are free 475 // to the rest of Full GC code. 476 r->set_new_top(r->top()); 477 } 478 }; 479 480 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 481 private: 482 ShenandoahHeap* const _heap; 483 ShenandoahMarkingContext* const _ctx; 484 485 public: 486 ShenandoahTrashImmediateGarbageClosure() : 487 _heap(ShenandoahHeap::heap()), 488 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 489 490 void heap_region_do(ShenandoahHeapRegion* r) { 491 if (r->is_humongous_start()) { 492 oop humongous_obj = oop(r->bottom()); 493 if (!_ctx->is_marked(humongous_obj)) { 494 assert(!r->has_live(), 495 "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number()); 496 _heap->trash_humongous_region_at(r); 497 } else { 498 assert(r->has_live(), 499 "Region " SIZE_FORMAT " should have live", r->region_number()); 500 } 501 } else if (r->is_humongous_continuation()) { 502 // If we hit continuation, the non-live humongous starts should have been trashed already 503 assert(r->humongous_start_region()->has_live(), 504 "Region " SIZE_FORMAT " should have live", r->region_number()); 505 } else if (r->is_regular()) { 506 if (!r->has_live()) { 507 r->make_trash_immediate(); 508 } 509 } 510 } 511 }; 512 513 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 514 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 515 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 516 517 ShenandoahHeap* heap = ShenandoahHeap::heap(); 518 519 // About to figure out which regions can be compacted, make sure pinning status 520 // had been updated in GC prologue. 521 heap->assert_pinned_region_status(); 522 523 { 524 // Trash the immediately collectible regions before computing addresses 525 ShenandoahTrashImmediateGarbageClosure tigcl; 526 heap->heap_region_iterate(&tigcl); 527 528 // Make sure regions are in good state: committed, active, clean. 529 // This is needed because we are potentially sliding the data through them. 530 ShenandoahEnsureHeapActiveClosure ecl; 531 heap->heap_region_iterate(&ecl); 532 } 533 534 // Compute the new addresses for regular objects 535 { 536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 537 ShenandoahPrepareForCompactionTask prepare_task(_preserved_marks, worker_slices); 538 heap->workers()->run_task(&prepare_task); 539 } 540 541 // Compute the new addresses for humongous objects 542 { 543 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 544 calculate_target_humongous_objects(); 545 } 546 } 547 548 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 549 private: 550 ShenandoahHeap* const _heap; 551 ShenandoahMarkingContext* const _ctx; 552 553 template <class T> 554 inline void do_oop_work(T* p) { 555 T o = RawAccess<>::oop_load(p); 556 if (!CompressedOops::is_null(o)) { 557 oop obj = CompressedOops::decode_not_null(o); 558 assert(_ctx->is_marked(obj), "must be marked"); 559 if (obj->is_forwarded()) { 560 oop forw = obj->forwardee(); 561 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 562 } 563 } 564 } 565 566 public: 567 ShenandoahAdjustPointersClosure() : 568 _heap(ShenandoahHeap::heap()), 569 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 570 571 void do_oop(oop* p) { do_oop_work(p); } 572 void do_oop(narrowOop* p) { do_oop_work(p); } 573 }; 574 575 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 576 private: 577 ShenandoahHeap* const _heap; 578 ShenandoahAdjustPointersClosure _cl; 579 580 public: 581 ShenandoahAdjustPointersObjectClosure() : 582 _heap(ShenandoahHeap::heap()) { 583 } 584 void do_object(oop p) { 585 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 586 p->oop_iterate(&_cl); 587 } 588 }; 589 590 class ShenandoahAdjustPointersTask : public AbstractGangTask { 591 private: 592 ShenandoahHeap* const _heap; 593 ShenandoahRegionIterator _regions; 594 595 public: 596 ShenandoahAdjustPointersTask() : 597 AbstractGangTask("Shenandoah Adjust Pointers Task"), 598 _heap(ShenandoahHeap::heap()) { 599 } 600 601 void work(uint worker_id) { 602 ShenandoahAdjustPointersObjectClosure obj_cl; 603 ShenandoahHeapRegion* r = _regions.next(); 604 while (r != NULL) { 605 if (!r->is_humongous_continuation() && r->has_live()) { 606 _heap->marked_object_iterate(r, &obj_cl); 607 } 608 r = _regions.next(); 609 } 610 } 611 }; 612 613 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 614 private: 615 ShenandoahRootAdjuster* _rp; 616 PreservedMarksSet* _preserved_marks; 617 public: 618 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 619 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 620 _rp(rp), 621 _preserved_marks(preserved_marks) {} 622 623 void work(uint worker_id) { 624 ShenandoahAdjustPointersClosure cl; 625 _rp->roots_do(worker_id, &cl); 626 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 627 } 628 }; 629 630 void ShenandoahMarkCompact::phase3_update_references() { 631 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 632 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 633 634 ShenandoahHeap* heap = ShenandoahHeap::heap(); 635 636 WorkGang* workers = heap->workers(); 637 uint nworkers = workers->active_workers(); 638 { 639 #if COMPILER2_OR_JVMCI 640 DerivedPointerTable::clear(); 641 #endif 642 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_roots); 643 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 644 workers->run_task(&task); 645 #if COMPILER2_OR_JVMCI 646 DerivedPointerTable::update_pointers(); 647 #endif 648 } 649 650 ShenandoahAdjustPointersTask adjust_pointers_task; 651 workers->run_task(&adjust_pointers_task); 652 } 653 654 class ShenandoahCompactObjectsClosure : public ObjectClosure { 655 private: 656 ShenandoahHeap* const _heap; 657 uint const _worker_id; 658 659 public: 660 ShenandoahCompactObjectsClosure(uint worker_id) : 661 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 662 663 void do_object(oop p) { 664 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 665 size_t size = (size_t)p->size(); 666 if (p->is_forwarded()) { 667 HeapWord* compact_from = cast_from_oop<HeapWord*>(p); 668 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee()); 669 Copy::aligned_conjoint_words(compact_from, compact_to, size); 670 oop new_obj = oop(compact_to); 671 new_obj->init_mark_raw(); 672 } 673 } 674 }; 675 676 class ShenandoahCompactObjectsTask : public AbstractGangTask { 677 private: 678 ShenandoahHeap* const _heap; 679 ShenandoahHeapRegionSet** const _worker_slices; 680 681 public: 682 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 683 AbstractGangTask("Shenandoah Compact Objects Task"), 684 _heap(ShenandoahHeap::heap()), 685 _worker_slices(worker_slices) { 686 } 687 688 void work(uint worker_id) { 689 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 690 691 ShenandoahCompactObjectsClosure cl(worker_id); 692 ShenandoahHeapRegion* r = slice.next(); 693 while (r != NULL) { 694 assert(!r->is_humongous(), "must not get humongous regions here"); 695 if (r->has_live()) { 696 _heap->marked_object_iterate(r, &cl); 697 } 698 r->set_top(r->new_top()); 699 r = slice.next(); 700 } 701 } 702 }; 703 704 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 705 private: 706 ShenandoahHeap* const _heap; 707 size_t _live; 708 709 public: 710 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 711 _heap->free_set()->clear(); 712 } 713 714 void heap_region_do(ShenandoahHeapRegion* r) { 715 assert (!r->is_cset(), "cset regions should have been demoted already"); 716 717 // Need to reset the complete-top-at-mark-start pointer here because 718 // the complete marking bitmap is no longer valid. This ensures 719 // size-based iteration in marked_object_iterate(). 720 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 721 // pinned regions. 722 if (!r->is_pinned()) { 723 _heap->complete_marking_context()->reset_top_at_mark_start(r); 724 } 725 726 size_t live = r->used(); 727 728 // Make empty regions that have been allocated into regular 729 if (r->is_empty() && live > 0) { 730 r->make_regular_bypass(); 731 } 732 733 // Reclaim regular regions that became empty 734 if (r->is_regular() && live == 0) { 735 r->make_trash(); 736 } 737 738 // Recycle all trash regions 739 if (r->is_trash()) { 740 live = 0; 741 r->recycle(); 742 } 743 744 r->set_live_data(live); 745 r->reset_alloc_metadata_to_shared(); 746 _live += live; 747 } 748 749 size_t get_live() { 750 return _live; 751 } 752 }; 753 754 void ShenandoahMarkCompact::compact_humongous_objects() { 755 // Compact humongous regions, based on their fwdptr objects. 756 // 757 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 758 // humongous regions are already compacted, and do not require further moves, which alleviates 759 // sliding costs. We may consider doing this in parallel in future. 760 761 ShenandoahHeap* heap = ShenandoahHeap::heap(); 762 763 for (size_t c = heap->num_regions(); c > 0; c--) { 764 ShenandoahHeapRegion* r = heap->get_region(c - 1); 765 if (r->is_humongous_start()) { 766 oop old_obj = oop(r->bottom()); 767 if (!old_obj->is_forwarded()) { 768 // No need to move the object, it stays at the same slot 769 continue; 770 } 771 size_t words_size = old_obj->size(); 772 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 773 774 size_t old_start = r->region_number(); 775 size_t old_end = old_start + num_regions - 1; 776 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 777 size_t new_end = new_start + num_regions - 1; 778 assert(old_start != new_start, "must be real move"); 779 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number()); 780 781 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 782 heap->get_region(new_start)->bottom(), 783 ShenandoahHeapRegion::region_size_words()*num_regions); 784 785 oop new_obj = oop(heap->get_region(new_start)->bottom()); 786 new_obj->init_mark_raw(); 787 788 { 789 for (size_t c = old_start; c <= old_end; c++) { 790 ShenandoahHeapRegion* r = heap->get_region(c); 791 r->make_regular_bypass(); 792 r->set_top(r->bottom()); 793 } 794 795 for (size_t c = new_start; c <= new_end; c++) { 796 ShenandoahHeapRegion* r = heap->get_region(c); 797 if (c == new_start) { 798 r->make_humongous_start_bypass(); 799 } else { 800 r->make_humongous_cont_bypass(); 801 } 802 803 // Trailing region may be non-full, record the remainder there 804 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 805 if ((c == new_end) && (remainder != 0)) { 806 r->set_top(r->bottom() + remainder); 807 } else { 808 r->set_top(r->end()); 809 } 810 811 r->reset_alloc_metadata_to_shared(); 812 } 813 } 814 } 815 } 816 } 817 818 // This is slightly different to ShHeap::reset_next_mark_bitmap: 819 // we need to remain able to walk pinned regions. 820 // Since pinned region do not move and don't get compacted, we will get holes with 821 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 822 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 823 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 824 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 825 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { 826 private: 827 ShenandoahRegionIterator _regions; 828 829 public: 830 ShenandoahMCResetCompleteBitmapTask() : 831 AbstractGangTask("Parallel Reset Bitmap Task") { 832 } 833 834 void work(uint worker_id) { 835 ShenandoahHeapRegion* region = _regions.next(); 836 ShenandoahHeap* heap = ShenandoahHeap::heap(); 837 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 838 while (region != NULL) { 839 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 840 ctx->clear_bitmap(region); 841 } 842 region = _regions.next(); 843 } 844 } 845 }; 846 847 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 848 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 849 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 850 851 ShenandoahHeap* heap = ShenandoahHeap::heap(); 852 853 // Compact regular objects first 854 { 855 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 856 ShenandoahCompactObjectsTask compact_task(worker_slices); 857 heap->workers()->run_task(&compact_task); 858 } 859 860 // Compact humongous objects after regular object moves 861 { 862 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 863 compact_humongous_objects(); 864 } 865 866 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 867 // and must ensure the bitmap is in sync. 868 { 869 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 870 ShenandoahMCResetCompleteBitmapTask task; 871 heap->workers()->run_task(&task); 872 } 873 874 // Bring regions in proper states after the collection, and set heap properties. 875 { 876 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 877 878 ShenandoahPostCompactClosure post_compact; 879 heap->heap_region_iterate(&post_compact); 880 heap->set_used(post_compact.get_live()); 881 882 heap->collection_set()->clear(); 883 heap->free_set()->rebuild(); 884 } 885 886 heap->clear_cancelled_gc(); 887 }