1 /* 2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/symbolTable.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 30 #include "gc_implementation/shared/parallelCleaning.hpp" 31 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" 32 #include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp" 33 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" 34 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" 35 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 36 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" 37 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" 38 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" 39 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" 40 #include "gc_implementation/shenandoah/shenandoahUtils.hpp" 41 #include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp" 42 43 #include "memory/referenceProcessor.hpp" 44 #include "memory/iterator.inline.hpp" 45 #include "memory/metaspace.hpp" 46 #include "memory/resourceArea.hpp" 47 #include "oops/oop.inline.hpp" 48 49 template<UpdateRefsMode UPDATE_REFS> 50 class ShenandoahInitMarkRootsClosure : public OopClosure { 51 private: 52 ShenandoahObjToScanQueue* _queue; 53 ShenandoahHeap* _heap; 54 ShenandoahStrDedupQueue* _dedup_queue; 55 ShenandoahMarkingContext* const _mark_context; 56 57 template <class T> 58 inline void do_oop_nv(T* p) { 59 ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context, _dedup_queue); 60 } 61 62 public: 63 ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) : 64 _queue(q), 65 _heap(ShenandoahHeap::heap()), 66 _dedup_queue(dq), 67 _mark_context(_heap->marking_context()) {}; 68 69 void do_oop(narrowOop* p) { do_oop_nv(p); } 70 void do_oop(oop* p) { do_oop_nv(p); } 71 }; 72 73 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : 74 MetadataAwareOopClosure(rp), 75 _queue(q), 76 _dedup_queue(NULL), 77 _heap(ShenandoahHeap::heap()), 78 _mark_context(_heap->marking_context()) 79 { } 80 81 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : 82 MetadataAwareOopClosure(rp), 83 _queue(q), 84 _dedup_queue(dq), 85 _heap(ShenandoahHeap::heap()), 86 _mark_context(_heap->marking_context()) 87 { } 88 89 template<UpdateRefsMode UPDATE_REFS> 90 class ShenandoahInitMarkRootsTask : public AbstractGangTask { 91 private: 92 ShenandoahRootProcessor* _rp; 93 public: 94 ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp) : 95 AbstractGangTask("Shenandoah init mark roots task"), 96 _rp(rp) { 97 } 98 99 void work(uint worker_id) { 100 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 101 ShenandoahParallelWorkerSession worker_session(worker_id); 102 103 ShenandoahHeap* heap = ShenandoahHeap::heap(); 104 ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues(); 105 assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id)); 106 107 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 108 ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q, NULL); 109 do_work(heap, &mark_cl, worker_id); 110 } 111 112 private: 113 void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) { 114 // The rationale for selecting the roots to scan is as follows: 115 // a. With unload_classes = true, we only want to scan the actual strong roots from the 116 // code cache. This will allow us to identify the dead classes, unload them, *and* 117 // invalidate the relevant code cache blobs. This could be only done together with 118 // class unloading. 119 // b. With unload_classes = false, we have to nominally retain all the references from code 120 // cache, because there could be the case of embedded class/oop in the generated code, 121 // which we will never visit during mark. Without code cache invalidation, as in (a), 122 // we risk executing that code cache blob, and crashing. 123 // c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here, 124 // and instead do that in concurrent phase under the relevant lock. This saves init mark 125 // pause time. 126 127 CLDToOopClosure clds_cl(oops); 128 MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations); 129 130 ResourceMark m; 131 if (heap->unload_classes()) { 132 _rp->process_strong_roots(oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id); 133 } else { 134 if (ShenandoahConcurrentScanCodeRoots) { 135 CodeBlobClosure* code_blobs = NULL; 136 #ifdef ASSERT 137 ShenandoahAssertToSpaceClosure assert_to_space_oops; 138 CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); 139 // If conc code cache evac is disabled, code cache should have only to-space ptrs. 140 // Otherwise, it should have to-space ptrs only if mark does not update refs. 141 if (!heap->has_forwarded_objects()) { 142 code_blobs = &assert_to_space; 143 } 144 #endif 145 _rp->process_all_roots(oops, &clds_cl, code_blobs, NULL, worker_id); 146 } else { 147 _rp->process_all_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id); 148 } 149 } 150 } 151 }; 152 153 class ShenandoahUpdateRootsTask : public AbstractGangTask { 154 private: 155 ShenandoahRootProcessor* _rp; 156 const bool _update_code_cache; 157 public: 158 ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) : 159 AbstractGangTask("Shenandoah update roots task"), 160 _rp(rp), 161 _update_code_cache(update_code_cache) { 162 } 163 164 void work(uint worker_id) { 165 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 166 ShenandoahParallelWorkerSession worker_session(worker_id); 167 168 ShenandoahHeap* heap = ShenandoahHeap::heap(); 169 ShenandoahUpdateRefsClosure cl; 170 CLDToOopClosure cldCl(&cl); 171 172 CodeBlobClosure* code_blobs; 173 CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations); 174 #ifdef ASSERT 175 ShenandoahAssertToSpaceClosure assert_to_space_oops; 176 CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); 177 #endif 178 if (_update_code_cache) { 179 code_blobs = &update_blobs; 180 } else { 181 code_blobs = 182 DEBUG_ONLY(&assert_to_space) 183 NOT_DEBUG(NULL); 184 } 185 _rp->process_all_roots(&cl, &cldCl, code_blobs, NULL, worker_id); 186 } 187 }; 188 189 class ShenandoahConcurrentMarkingTask : public AbstractGangTask { 190 private: 191 ShenandoahConcurrentMark* _cm; 192 ShenandoahTaskTerminator* _terminator; 193 194 public: 195 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) : 196 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) { 197 } 198 199 void work(uint worker_id) { 200 ShenandoahHeap* heap = ShenandoahHeap::heap(); 201 ShenandoahConcurrentWorkerSession worker_session(worker_id); 202 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); 203 ReferenceProcessor* rp; 204 if (heap->process_references()) { 205 rp = heap->ref_processor(); 206 shenandoah_assert_rp_isalive_installed(); 207 } else { 208 rp = NULL; 209 } 210 211 _cm->concurrent_scan_code_roots(worker_id, rp); 212 _cm->mark_loop(worker_id, _terminator, rp, 213 true, // cancellable 214 ShenandoahStringDedup::is_enabled()); // perform string dedup 215 } 216 }; 217 218 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure { 219 private: 220 ShenandoahSATBBufferClosure* _satb_cl; 221 OopClosure* const _cl; 222 MarkingCodeBlobClosure* _code_cl; 223 int _thread_parity; 224 225 public: 226 ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) : 227 _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl), 228 _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} 229 230 void do_thread(Thread* thread) { 231 if (thread->is_Java_thread()) { 232 if (thread->claim_oops_do(true, _thread_parity)) { 233 JavaThread* jt = (JavaThread*)thread; 234 jt->satb_mark_queue().apply_closure_and_empty(_satb_cl); 235 if (_cl != NULL) { 236 ResourceMark rm; 237 jt->oops_do(_cl, NULL, _code_cl); 238 } else if (_code_cl != NULL) { 239 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 240 // however the liveness of oops reachable from nmethods have very complex lifecycles: 241 // * Alive if on the stack of an executing method 242 // * Weakly reachable otherwise 243 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 244 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 245 jt->nmethods_do(_code_cl); 246 } 247 } 248 } else if (thread->is_VM_thread()) { 249 if (thread->claim_oops_do(true, _thread_parity)) { 250 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); 251 } 252 } 253 } 254 }; 255 256 class ShenandoahFinalMarkingTask : public AbstractGangTask { 257 private: 258 ShenandoahConcurrentMark* _cm; 259 ShenandoahTaskTerminator* _terminator; 260 bool _dedup_string; 261 262 public: 263 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) : 264 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) { 265 } 266 267 void work(uint worker_id) { 268 ShenandoahHeap* heap = ShenandoahHeap::heap(); 269 270 ReferenceProcessor* rp; 271 if (heap->process_references()) { 272 rp = heap->ref_processor(); 273 shenandoah_assert_rp_isalive_installed(); 274 } else { 275 rp = NULL; 276 } 277 278 // First drain remaining SATB buffers. 279 // Notice that this is not strictly necessary for mark-compact. But since 280 // it requires a StrongRootsScope around the task, we need to claim the 281 // threads, and performance-wise it doesn't really matter. Adds about 1ms to 282 // full-gc. 283 { 284 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); 285 ShenandoahStrDedupQueue *dq = NULL; 286 if (ShenandoahStringDedup::is_enabled()) { 287 dq = ShenandoahStringDedup::queue(worker_id); 288 } 289 ShenandoahSATBBufferClosure cl(q, dq); 290 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 291 while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); 292 bool do_nmethods = heap->unload_classes(); 293 if (heap->has_forwarded_objects()) { 294 ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp); 295 MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations); 296 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, 297 ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL, 298 do_nmethods ? &blobsCl : NULL); 299 Threads::threads_do(&tc); 300 } else { 301 ShenandoahMarkRefsClosure mark_cl(q, rp); 302 MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations); 303 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, 304 ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL, 305 do_nmethods ? &blobsCl : NULL); 306 Threads::threads_do(&tc); 307 } 308 } 309 310 if (heap->is_degenerated_gc_in_progress()) { 311 // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, 312 // let's check here. 313 _cm->concurrent_scan_code_roots(worker_id, rp); 314 } 315 316 _cm->mark_loop(worker_id, _terminator, rp, 317 false, // not cancellable 318 _dedup_string); 319 320 assert(_cm->task_queues()->is_empty(), "Should be empty"); 321 } 322 }; 323 324 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) { 325 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 326 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 327 328 ShenandoahGCPhase phase(root_phase); 329 330 WorkGang* workers = _heap->workers(); 331 uint nworkers = workers->active_workers(); 332 333 assert(nworkers <= task_queues()->size(), "Just check"); 334 335 ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase); 336 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); 337 task_queues()->reserve(nworkers); 338 339 if (_heap->has_forwarded_objects()) { 340 ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc); 341 workers->run_task(&mark_roots); 342 } else { 343 // No need to update references, which means the heap is stable. 344 // Can save time not walking through forwarding pointers. 345 ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc); 346 workers->run_task(&mark_roots); 347 } 348 349 if (ShenandoahConcurrentScanCodeRoots) { 350 clear_claim_codecache(); 351 } 352 } 353 354 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) { 355 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 356 357 bool update_code_cache = true; // initialize to safer value 358 switch (root_phase) { 359 case ShenandoahPhaseTimings::update_roots: 360 case ShenandoahPhaseTimings::final_update_refs_roots: 361 update_code_cache = false; 362 break; 363 case ShenandoahPhaseTimings::full_gc_roots: 364 case ShenandoahPhaseTimings::degen_gc_update_roots: 365 update_code_cache = true; 366 break; 367 default: 368 ShouldNotReachHere(); 369 } 370 371 ShenandoahHeap* heap = ShenandoahHeap::heap(); 372 373 ShenandoahGCPhase phase(root_phase); 374 375 COMPILER2_PRESENT(DerivedPointerTable::clear()); 376 377 uint nworkers = heap->workers()->active_workers(); 378 379 ShenandoahRootProcessor root_proc(heap, nworkers, root_phase); 380 ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache); 381 heap->workers()->run_task(&update_roots); 382 383 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 384 } 385 386 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask { 387 private: 388 SharedHeap::StrongRootsScope _srs; 389 ShenandoahPhaseTimings::Phase _phase; 390 ShenandoahGCWorkerPhase _worker_phase; 391 public: 392 ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) : 393 AbstractGangTask("Shenandoah Update Thread Roots"), 394 _srs(ShenandoahHeap::heap(), true), 395 _phase(phase), 396 _worker_phase(phase) {} 397 398 void work(uint worker_id) { 399 ShenandoahUpdateRefsClosure cl; 400 ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::ThreadRoots, worker_id); 401 ResourceMark rm; 402 Threads::possibly_parallel_oops_do(&cl, NULL, NULL); 403 } 404 }; 405 406 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { 407 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 408 409 ShenandoahGCPhase phase(root_phase); 410 411 COMPILER2_PRESENT(DerivedPointerTable::clear()); 412 413 WorkGang* workers = _heap->workers(); 414 bool is_par = workers->active_workers() > 1; 415 416 ShenandoahUpdateThreadRootsTask task(is_par, root_phase); 417 workers->run_task(&task); 418 419 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 420 } 421 422 void ShenandoahConcurrentMark::initialize(uint workers) { 423 _heap = ShenandoahHeap::heap(); 424 425 uint num_queues = MAX2(workers, 1U); 426 427 _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues); 428 429 for (uint i = 0; i < num_queues; ++i) { 430 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 431 task_queue->initialize(); 432 _task_queues->register_queue(i, task_queue); 433 } 434 435 JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize); 436 } 437 438 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) { 439 if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) { 440 ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id); 441 if (!_heap->unload_classes()) { 442 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 443 // TODO: We can not honor StringDeduplication here, due to lock ranking 444 // inversion. So, we may miss some deduplication candidates. 445 if (_heap->has_forwarded_objects()) { 446 ShenandoahMarkResolveRefsClosure cl(q, rp); 447 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); 448 CodeCache::blobs_do(&blobs); 449 } else { 450 ShenandoahMarkRefsClosure cl(q, rp); 451 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); 452 CodeCache::blobs_do(&blobs); 453 } 454 } 455 } 456 } 457 458 void ShenandoahConcurrentMark::mark_from_roots() { 459 WorkGang* workers = _heap->workers(); 460 uint nworkers = workers->active_workers(); 461 462 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark); 463 464 if (_heap->process_references()) { 465 ReferenceProcessor* rp = _heap->ref_processor(); 466 rp->set_active_mt_degree(nworkers); 467 468 // enable ("weak") refs discovery 469 rp->enable_discovery(true /*verify_no_refs*/, true); 470 rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs()); 471 } 472 473 shenandoah_assert_rp_isalive_not_installed(); 474 ShenandoahIsAliveSelector is_alive; 475 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); 476 477 task_queues()->reserve(nworkers); 478 479 { 480 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 481 ShenandoahConcurrentMarkingTask task(this, &terminator); 482 workers->run_task(&task); 483 } 484 485 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled"); 486 if (!_heap->cancelled_gc()) { 487 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); 488 } 489 490 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); 491 } 492 493 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) { 494 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 495 496 uint nworkers = _heap->workers()->active_workers(); 497 498 // Finally mark everything else we've got in our queues during the previous steps. 499 // It does two different things for concurrent vs. mark-compact GC: 500 // - For concurrent GC, it starts with empty task queues, drains the remaining 501 // SATB buffers, and then completes the marking closure. 502 // - For mark-compact GC, it starts out with the task queues seeded by initial 503 // root scan, and completes the closure, thus marking through all live objects 504 // The implementation is the same, so it's shared here. 505 { 506 ShenandoahGCPhase phase(full_gc ? 507 ShenandoahPhaseTimings::full_gc_mark_finish_queues : 508 ShenandoahPhaseTimings::finish_queues); 509 task_queues()->reserve(nworkers); 510 511 shenandoah_assert_rp_isalive_not_installed(); 512 ShenandoahIsAliveSelector is_alive; 513 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); 514 515 SharedHeap::StrongRootsScope scope(_heap, true); 516 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 517 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); 518 _heap->workers()->run_task(&task); 519 } 520 521 assert(task_queues()->is_empty(), "Should be empty"); 522 523 // When we're done marking everything, we process weak references. 524 if (_heap->process_references()) { 525 weak_refs_work(full_gc); 526 } else { 527 cleanup_jni_refs(); 528 } 529 530 // And finally finish class unloading 531 if (_heap->unload_classes()) { 532 _heap->unload_classes_and_cleanup_tables(full_gc); 533 } else if (ShenandoahStringDedup::is_enabled()) { 534 ShenandoahStringDedup::parallel_cleanup(); 535 } 536 assert(task_queues()->is_empty(), "Should be empty"); 537 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); 538 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); 539 540 // Resize Metaspace 541 MetaspaceGC::compute_new_size(); 542 } 543 544 // Weak Reference Closures 545 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { 546 uint _worker_id; 547 ShenandoahTaskTerminator* _terminator; 548 bool _reset_terminator; 549 550 public: 551 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 552 _worker_id(worker_id), 553 _terminator(t), 554 _reset_terminator(reset_terminator) { 555 } 556 557 void do_void() { 558 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 559 560 ShenandoahHeap* sh = ShenandoahHeap::heap(); 561 ShenandoahConcurrentMark* scm = sh->concurrent_mark(); 562 assert(sh->process_references(), "why else would we be here?"); 563 ReferenceProcessor* rp = sh->ref_processor(); 564 565 shenandoah_assert_rp_isalive_installed(); 566 567 scm->mark_loop(_worker_id, _terminator, rp, 568 false, // not cancellable 569 false); // do not do strdedup 570 571 if (_reset_terminator) { 572 _terminator->reset_for_reuse(); 573 } 574 } 575 }; 576 577 class ShenandoahCMKeepAliveClosure : public OopClosure { 578 private: 579 ShenandoahObjToScanQueue* _queue; 580 ShenandoahHeap* _heap; 581 ShenandoahMarkingContext* const _mark_context; 582 583 template <class T> 584 inline void do_oop_nv(T* p) { 585 ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context); 586 } 587 588 public: 589 ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : 590 _queue(q), 591 _heap(ShenandoahHeap::heap()), 592 _mark_context(_heap->marking_context()) {} 593 594 void do_oop(narrowOop* p) { do_oop_nv(p); } 595 void do_oop(oop* p) { do_oop_nv(p); } 596 }; 597 598 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { 599 private: 600 ShenandoahObjToScanQueue* _queue; 601 ShenandoahHeap* _heap; 602 ShenandoahMarkingContext* const _mark_context; 603 604 template <class T> 605 inline void do_oop_nv(T* p) { 606 ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context); 607 } 608 609 public: 610 ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 611 _queue(q), 612 _heap(ShenandoahHeap::heap()), 613 _mark_context(_heap->marking_context()) {} 614 615 void do_oop(narrowOop* p) { do_oop_nv(p); } 616 void do_oop(oop* p) { do_oop_nv(p); } 617 }; 618 619 class ShenandoahRefProcTaskProxy : public AbstractGangTask { 620 private: 621 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 622 ShenandoahTaskTerminator* _terminator; 623 624 public: 625 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 626 ShenandoahTaskTerminator* t) : 627 AbstractGangTask("Process reference objects in parallel"), 628 _proc_task(proc_task), 629 _terminator(t) { 630 } 631 632 void work(uint worker_id) { 633 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 634 ShenandoahHeap* heap = ShenandoahHeap::heap(); 635 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator); 636 if (heap->has_forwarded_objects()) { 637 ShenandoahForwardedIsAliveClosure is_alive; 638 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); 639 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 640 } else { 641 ShenandoahIsAliveClosure is_alive; 642 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); 643 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 644 } 645 } 646 }; 647 648 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask { 649 private: 650 AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task; 651 652 public: 653 ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) : 654 AbstractGangTask("Enqueue reference objects in parallel"), 655 _enqueue_task(enqueue_task) { 656 } 657 658 void work(uint worker_id) { 659 _enqueue_task.work(worker_id); 660 } 661 }; 662 663 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 664 private: 665 WorkGang* _workers; 666 667 public: 668 ShenandoahRefProcTaskExecutor(WorkGang* workers) : 669 _workers(workers) { 670 } 671 672 // Executes a task using worker threads. 673 void execute(ProcessTask& task) { 674 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 675 676 // Shortcut execution if task is empty. 677 // This should be replaced with the generic ReferenceProcessor shortcut, 678 // see JDK-8181214, JDK-8043575, JDK-6938732. 679 if (task.is_empty()) { 680 return; 681 } 682 683 ShenandoahHeap* heap = ShenandoahHeap::heap(); 684 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 685 uint nworkers = _workers->active_workers(); 686 cm->task_queues()->reserve(nworkers); 687 688 ShenandoahTaskTerminator terminator(nworkers, cm->task_queues()); 689 ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator); 690 _workers->run_task(&proc_task_proxy); 691 } 692 693 void execute(EnqueueTask& task) { 694 ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task); 695 _workers->run_task(&enqueue_task_proxy); 696 } 697 }; 698 699 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) { 700 assert(_heap->process_references(), "sanity"); 701 702 ShenandoahPhaseTimings::Phase phase_root = 703 full_gc ? 704 ShenandoahPhaseTimings::full_gc_weakrefs : 705 ShenandoahPhaseTimings::weakrefs; 706 707 ShenandoahGCPhase phase(phase_root); 708 709 ReferenceProcessor* rp = _heap->ref_processor(); 710 weak_refs_work_doit(full_gc); 711 712 rp->verify_no_references_recorded(); 713 assert(!rp->discovery_enabled(), "Post condition"); 714 715 } 716 717 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { 718 ReferenceProcessor* rp = _heap->ref_processor(); 719 720 ShenandoahPhaseTimings::Phase phase_process = 721 full_gc ? 722 ShenandoahPhaseTimings::full_gc_weakrefs_process : 723 ShenandoahPhaseTimings::weakrefs_process; 724 725 ShenandoahPhaseTimings::Phase phase_enqueue = 726 full_gc ? 727 ShenandoahPhaseTimings::full_gc_weakrefs_enqueue : 728 ShenandoahPhaseTimings::weakrefs_enqueue; 729 730 shenandoah_assert_rp_isalive_not_installed(); 731 ShenandoahIsAliveSelector is_alive; 732 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); 733 734 WorkGang* workers = _heap->workers(); 735 uint nworkers = workers->active_workers(); 736 737 rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs()); 738 rp->set_active_mt_degree(nworkers); 739 740 assert(task_queues()->is_empty(), "Should be empty"); 741 742 // complete_gc and keep_alive closures instantiated here are only needed for 743 // single-threaded path in RP. They share the queue 0 for tracking work, which 744 // simplifies implementation. Since RP may decide to call complete_gc several 745 // times, we need to be able to reuse the terminator. 746 uint serial_worker_id = 0; 747 ShenandoahTaskTerminator terminator(1, task_queues()); 748 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 749 750 ShenandoahRefProcTaskExecutor executor(workers); 751 752 { 753 ShenandoahGCPhase phase(phase_process); 754 755 if (_heap->has_forwarded_objects()) { 756 ShenandoahForwardedIsAliveClosure is_alive; 757 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id)); 758 rp->process_discovered_references(&is_alive, &keep_alive, 759 &complete_gc, &executor, 760 NULL, _heap->shenandoah_policy()->tracer()->gc_id()); 761 } else { 762 ShenandoahIsAliveClosure is_alive; 763 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id)); 764 rp->process_discovered_references(&is_alive, &keep_alive, 765 &complete_gc, &executor, 766 NULL, _heap->shenandoah_policy()->tracer()->gc_id()); 767 } 768 769 assert(task_queues()->is_empty(), "Should be empty"); 770 } 771 772 { 773 ShenandoahGCPhase phase(phase_enqueue); 774 rp->enqueue_discovered_references(&executor); 775 } 776 } 777 778 // No-op closure. Weak JNI refs are cleaned by iterating them. 779 // Nothing else to do here. 780 class ShenandoahCleanupWeakRootsClosure : public OopClosure { 781 virtual void do_oop(oop* o) {} 782 virtual void do_oop(narrowOop* o) {} 783 }; 784 785 void ShenandoahConcurrentMark::cleanup_jni_refs() { 786 ShenandoahIsAliveSelector is_alive; 787 ShenandoahCleanupWeakRootsClosure cl; 788 JNIHandles::weak_oops_do(is_alive.is_alive_closure(), &cl); 789 } 790 791 class ShenandoahCancelledGCYieldClosure : public YieldClosure { 792 private: 793 ShenandoahHeap* const _heap; 794 public: 795 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 796 virtual bool should_return() { return _heap->cancelled_gc(); } 797 }; 798 799 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { 800 public: 801 void do_void() { 802 ShenandoahHeap* sh = ShenandoahHeap::heap(); 803 ShenandoahConcurrentMark* scm = sh->concurrent_mark(); 804 assert(sh->process_references(), "why else would we be here?"); 805 ShenandoahTaskTerminator terminator(1, scm->task_queues()); 806 807 ReferenceProcessor* rp = sh->ref_processor(); 808 shenandoah_assert_rp_isalive_installed(); 809 810 scm->mark_loop(0, &terminator, rp, 811 false, // not cancellable 812 false); // do not do strdedup 813 } 814 }; 815 816 class ShenandoahPrecleanTask : public AbstractGangTask { 817 private: 818 ReferenceProcessor* _rp; 819 820 public: 821 ShenandoahPrecleanTask(ReferenceProcessor* rp) : 822 AbstractGangTask("Precleaning task"), 823 _rp(rp) {} 824 825 void work(uint worker_id) { 826 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 827 ShenandoahParallelWorkerSession worker_session(worker_id); 828 829 ShenandoahHeap* sh = ShenandoahHeap::heap(); 830 assert(!sh->has_forwarded_objects(), "No forwarded objects expected here"); 831 832 ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id); 833 834 ShenandoahCancelledGCYieldClosure yield; 835 ShenandoahPrecleanCompleteGCClosure complete_gc; 836 837 ShenandoahIsAliveClosure is_alive; 838 ShenandoahCMKeepAliveClosure keep_alive(q); 839 ResourceMark rm; 840 _rp->preclean_discovered_references(&is_alive, &keep_alive, 841 &complete_gc, &yield, 842 NULL, sh->shenandoah_policy()->tracer()->gc_id()); 843 } 844 }; 845 846 void ShenandoahConcurrentMark::preclean_weak_refs() { 847 // Pre-cleaning weak references before diving into STW makes sense at the 848 // end of concurrent mark. This will filter out the references which referents 849 // are alive. Note that ReferenceProcessor already filters out these on reference 850 // discovery, and the bulk of work is done here. This phase processes leftovers 851 // that missed the initial filtering, i.e. when referent was marked alive after 852 // reference was discovered by RP. 853 854 assert(_heap->process_references(), "sanity"); 855 856 ReferenceProcessor* rp = _heap->ref_processor(); 857 858 assert(task_queues()->is_empty(), "Should be empty"); 859 860 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 861 862 shenandoah_assert_rp_isalive_not_installed(); 863 ShenandoahIsAliveSelector is_alive; 864 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); 865 866 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 867 // queues and other goodies. When upstream ReferenceProcessor starts supporting 868 // parallel precleans, we can extend this to more threads. 869 WorkGang* workers = _heap->workers(); 870 uint nworkers = workers->active_workers(); 871 assert(nworkers == 1, "This code uses only a single worker"); 872 task_queues()->reserve(nworkers); 873 874 ShenandoahPrecleanTask task(rp); 875 workers->run_task(&task); 876 877 assert(task_queues()->is_empty(), "Should be empty"); 878 } 879 880 void ShenandoahConcurrentMark::cancel() { 881 // Clean up marking stacks. 882 ShenandoahObjToScanQueueSet* queues = task_queues(); 883 queues->clear(); 884 885 // Cancel SATB buffers. 886 JavaThread::satb_mark_queue_set().abandon_partial_marking(); 887 } 888 889 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) { 890 assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id)); 891 return _task_queues->queue(worker_id); 892 } 893 894 template <bool CANCELLABLE> 895 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp, 896 bool strdedup) { 897 ShenandoahObjToScanQueue* q = get_queue(w); 898 899 jushort* ld = _heap->get_liveness_cache(w); 900 901 // TODO: We can clean up this if we figure out how to do templated oop closures that 902 // play nice with specialized_oop_iterators. 903 if (_heap->unload_classes()) { 904 if (_heap->has_forwarded_objects()) { 905 if (strdedup) { 906 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); 907 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp); 908 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); 909 } else { 910 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); 911 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); 912 } 913 } else { 914 if (strdedup) { 915 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); 916 ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp); 917 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); 918 } else { 919 ShenandoahMarkRefsMetadataClosure cl(q, rp); 920 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); 921 } 922 } 923 } else { 924 if (_heap->has_forwarded_objects()) { 925 if (strdedup) { 926 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); 927 ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp); 928 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); 929 } else { 930 ShenandoahMarkUpdateRefsClosure cl(q, rp); 931 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t); 932 } 933 } else { 934 if (strdedup) { 935 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); 936 ShenandoahMarkRefsDedupClosure cl(q, dq, rp); 937 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); 938 } else { 939 ShenandoahMarkRefsClosure cl(q, rp); 940 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t); 941 } 942 } 943 } 944 945 _heap->flush_liveness_cache(w); 946 } 947 948 template <class T, bool CANCELLABLE> 949 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) { 950 int seed = 17; 951 uintx stride = ShenandoahMarkLoopStride; 952 953 ShenandoahHeap* heap = ShenandoahHeap::heap(); 954 ShenandoahObjToScanQueueSet* queues = task_queues(); 955 ShenandoahObjToScanQueue* q; 956 ShenandoahMarkTask t; 957 958 /* 959 * Process outstanding queues, if any. 960 * 961 * There can be more queues than workers. To deal with the imbalance, we claim 962 * extra queues first. Since marking can push new tasks into the queue associated 963 * with this worker id, we come back to process this queue in the normal loop. 964 */ 965 assert(queues->get_reserved() == heap->workers()->active_workers(), 966 "Need to reserve proper number of queues"); 967 968 q = queues->claim_next(); 969 while (q != NULL) { 970 if (CANCELLABLE && heap->cancelled_gc()) { 971 return; 972 } 973 974 for (uint i = 0; i < stride; i++) { 975 if (q->pop(t)) { 976 do_task<T>(q, cl, live_data, &t); 977 } else { 978 assert(q->is_empty(), "Must be empty"); 979 q = queues->claim_next(); 980 break; 981 } 982 } 983 } 984 985 q = get_queue(worker_id); 986 987 ShenandoahStrDedupQueue *dq = NULL; 988 if (ShenandoahStringDedup::is_enabled()) { 989 dq = ShenandoahStringDedup::queue(worker_id); 990 } 991 992 ShenandoahSATBBufferClosure drain_satb(q, dq); 993 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 994 995 /* 996 * Normal marking loop: 997 */ 998 while (true) { 999 if (CANCELLABLE && heap->cancelled_gc()) { 1000 return; 1001 } 1002 1003 while (satb_mq_set.completed_buffers_num() > 0) { 1004 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 1005 } 1006 1007 uint work = 0; 1008 for (uint i = 0; i < stride; i++) { 1009 if (q->pop(t) || 1010 queues->steal(worker_id, &seed, t)) { 1011 do_task<T>(q, cl, live_data, &t); 1012 work++; 1013 } else { 1014 break; 1015 } 1016 } 1017 1018 if (work == 0) { 1019 // No work encountered in current stride, try to terminate. 1020 ShenandoahTerminatorTerminator tt(heap); 1021 if (terminator->offer_termination(&tt)) return; 1022 } 1023 } 1024 } 1025 1026 bool ShenandoahConcurrentMark::claim_codecache() { 1027 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise"); 1028 return _claimed_codecache.try_set(); 1029 } 1030 1031 void ShenandoahConcurrentMark::clear_claim_codecache() { 1032 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise"); 1033 _claimed_codecache.unset(); 1034 }