1 /* 2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "code/codeCache.hpp" 30 31 #include "gc/shared/weakProcessor.inline.hpp" 32 #include "gc/shared/gcTimer.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/referenceProcessor.hpp" 35 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 36 #include "gc/shared/strongRootsScope.hpp" 37 38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 40 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 41 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 42 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 46 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 47 #include "gc/shenandoah/shenandoahUtils.hpp" 48 49 #include "memory/iterator.inline.hpp" 50 #include "memory/metaspace.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "runtime/handles.inline.hpp" 54 55 template<UpdateRefsMode UPDATE_REFS> 56 class ShenandoahInitMarkRootsClosure : public OopClosure { 57 private: 58 ShenandoahObjToScanQueue* _queue; 59 ShenandoahHeap* _heap; 60 ShenandoahMarkingContext* const _mark_context; 61 62 template <class T> 63 inline void do_oop_work(T* p) { 64 ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context); 65 } 66 67 public: 68 ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : 69 _queue(q), 70 _heap(ShenandoahHeap::heap()), 71 _mark_context(_heap->marking_context()) {}; 72 73 void do_oop(narrowOop* p) { do_oop_work(p); } 74 void do_oop(oop* p) { do_oop_work(p); } 75 }; 76 77 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : 78 MetadataVisitingOopIterateClosure(rp), 79 _queue(q), 80 _heap(ShenandoahHeap::heap()), 81 _mark_context(_heap->marking_context()) 82 { } 83 84 template<UpdateRefsMode UPDATE_REFS> 85 class ShenandoahInitMarkRootsTask : public AbstractGangTask { 86 private: 87 ShenandoahRootScanner* _rp; 88 public: 89 ShenandoahInitMarkRootsTask(ShenandoahRootScanner* rp) : 90 AbstractGangTask("Shenandoah init mark roots task"), 91 _rp(rp) { 92 } 93 94 void work(uint worker_id) { 95 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 96 ShenandoahParallelWorkerSession worker_session(worker_id); 97 98 ShenandoahHeap* heap = ShenandoahHeap::heap(); 99 ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues(); 100 assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id); 101 102 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 103 104 ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q); 105 do_work(heap, &mark_cl, worker_id); 106 } 107 108 private: 109 void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) { 110 // The rationale for selecting the roots to scan is as follows: 111 // a. With unload_classes = true, we only want to scan the actual strong roots from the 112 // code cache. This will allow us to identify the dead classes, unload them, *and* 113 // invalidate the relevant code cache blobs. This could be only done together with 114 // class unloading. 115 // b. With unload_classes = false, we have to nominally retain all the references from code 116 // cache, because there could be the case of embedded class/oop in the generated code, 117 // which we will never visit during mark. Without code cache invalidation, as in (a), 118 // we risk executing that code cache blob, and crashing. 119 if (heap->unload_classes()) { 120 _rp->strong_roots_do(worker_id, oops); 121 } else { 122 _rp->roots_do(worker_id, oops); 123 } 124 } 125 }; 126 127 class ShenandoahUpdateRootsTask : public AbstractGangTask { 128 private: 129 ShenandoahRootUpdater* _root_updater; 130 bool _check_alive; 131 public: 132 ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater, bool check_alive) : 133 AbstractGangTask("Shenandoah update roots task"), 134 _root_updater(root_updater), 135 _check_alive(check_alive){ 136 } 137 138 void work(uint worker_id) { 139 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 140 ShenandoahParallelWorkerSession worker_session(worker_id); 141 142 ShenandoahHeap* heap = ShenandoahHeap::heap(); 143 ShenandoahUpdateRefsClosure cl; 144 if (_check_alive) { 145 ShenandoahForwardedIsAliveClosure is_alive; 146 _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl); 147 } else { 148 AlwaysTrueClosure always_true;; 149 _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl); 150 } 151 } 152 }; 153 154 class ShenandoahConcurrentMarkingTask : public AbstractGangTask { 155 private: 156 ShenandoahConcurrentMark* _cm; 157 TaskTerminator* _terminator; 158 159 public: 160 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) : 161 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) { 162 } 163 164 void work(uint worker_id) { 165 ShenandoahHeap* heap = ShenandoahHeap::heap(); 166 ShenandoahConcurrentWorkerSession worker_session(worker_id); 167 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 168 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); 169 ReferenceProcessor* rp; 170 if (heap->process_references()) { 171 rp = heap->ref_processor(); 172 shenandoah_assert_rp_isalive_installed(); 173 } else { 174 rp = NULL; 175 } 176 177 _cm->mark_loop(worker_id, _terminator, rp, 178 true, // cancellable 179 ShenandoahStringDedup::is_enabled()); // perform string dedup 180 } 181 }; 182 183 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure { 184 private: 185 ShenandoahSATBBufferClosure* _satb_cl; 186 OopClosure* const _cl; 187 MarkingCodeBlobClosure* _code_cl; 188 uintx _claim_token; 189 190 public: 191 ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) : 192 _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl), 193 _claim_token(Threads::thread_claim_token()) {} 194 195 void do_thread(Thread* thread) { 196 if (thread->claim_threads_do(true, _claim_token)) { 197 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl); 198 if (thread->is_Java_thread()) { 199 if (_cl != NULL) { 200 ResourceMark rm; 201 thread->oops_do(_cl, _code_cl); 202 } else if (_code_cl != NULL) { 203 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking 204 // however the liveness of oops reachable from nmethods have very complex lifecycles: 205 // * Alive if on the stack of an executing method 206 // * Weakly reachable otherwise 207 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be 208 // live by the SATB invariant but other oops recorded in nmethods may behave differently. 209 JavaThread* jt = (JavaThread*)thread; 210 jt->nmethods_do(_code_cl); 211 } 212 } 213 } 214 } 215 }; 216 217 template <bool CONCURRENT, bool SINGLE_THREADED> 218 class ShenandoahConcurrentRootsIterator { 219 private: 220 ShenandoahVMRoots<CONCURRENT> _vm_roots; 221 ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED> 222 _cld_roots; 223 ShenandoahNMethodTableSnapshot* _codecache_snapshot; 224 ShenandoahPhaseTimings::Phase _phase; 225 226 public: 227 ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase); 228 ~ShenandoahConcurrentRootsIterator(); 229 230 void oops_do(OopClosure* oops, uint worker_id); 231 }; 232 233 template <bool CONCURRENT, bool SINGLE_THREADED> 234 ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase) : 235 _vm_roots(phase), 236 _cld_roots(phase), 237 _codecache_snapshot(NULL), 238 _phase(phase) { 239 if (!ShenandoahHeap::heap()->unload_classes()) { 240 if (CONCURRENT) { 241 CodeCache_lock->lock_without_safepoint_check(); 242 } else { 243 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); 244 } 245 _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration(); 246 } 247 assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking"); 248 } 249 250 template <bool CONCURRENT, bool SINGLE_THREADED> 251 ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::~ShenandoahConcurrentRootsIterator() { 252 if (!ShenandoahHeap::heap()->unload_classes()) { 253 ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot); 254 if (CONCURRENT) { 255 CodeCache_lock->unlock(); 256 } 257 } 258 } 259 260 template <bool CONCURRENT, bool SINGLE_THREADED> 261 void ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::oops_do(OopClosure* oops, uint worker_id) { 262 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 263 CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none); 264 _vm_roots.oops_do(oops, worker_id); 265 266 if (!heap->unload_classes()) { 267 _cld_roots.cld_do(&clds_cl, worker_id); 268 269 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 270 CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); 271 _codecache_snapshot->parallel_blobs_do(&blobs); 272 } else { 273 _cld_roots.always_strong_cld_do(&clds_cl, worker_id); 274 } 275 } 276 277 // Process concurrent roots at safepoints 278 template <typename CLOSURE> 279 class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask { 280 private: 281 ShenandoahConcurrentRootsIterator<false /* concurrent */, false /* single_thread */> _itr; 282 ShenandoahConcurrentMark* const _cm; 283 ReferenceProcessor* _rp; 284 public: 285 286 ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm, 287 ShenandoahPhaseTimings::Phase phase); 288 void work(uint worker_id); 289 }; 290 291 template <typename CLOSURE> 292 ShenandoahProcessConcurrentRootsTask<CLOSURE>::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm, 293 ShenandoahPhaseTimings::Phase phase) : 294 AbstractGangTask("Shenandoah STW Concurrent Mark Task"), 295 _itr(phase), 296 _cm(cm), 297 _rp(NULL) { 298 ShenandoahHeap* heap = ShenandoahHeap::heap(); 299 if (heap->process_references()) { 300 _rp = heap->ref_processor(); 301 shenandoah_assert_rp_isalive_installed(); 302 } 303 } 304 305 template <typename CLOSURE> 306 void ShenandoahProcessConcurrentRootsTask<CLOSURE>::work(uint worker_id) { 307 ShenandoahParallelWorkerSession worker_session(worker_id); 308 ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id); 309 CLOSURE cl(q, _rp); 310 _itr.oops_do(&cl, worker_id); 311 } 312 313 314 class ShenandoahFinalMarkingTask : public AbstractGangTask { 315 private: 316 ShenandoahConcurrentMark* _cm; 317 TaskTerminator* _terminator; 318 bool _dedup_string; 319 320 public: 321 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) : 322 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) { 323 } 324 325 void work(uint worker_id) { 326 ShenandoahHeap* heap = ShenandoahHeap::heap(); 327 328 ShenandoahParallelWorkerSession worker_session(worker_id); 329 ReferenceProcessor* rp; 330 if (heap->process_references()) { 331 rp = heap->ref_processor(); 332 shenandoah_assert_rp_isalive_installed(); 333 } else { 334 rp = NULL; 335 } 336 337 // First drain remaining SATB buffers. 338 // Notice that this is not strictly necessary for mark-compact. But since 339 // it requires a StrongRootsScope around the task, we need to claim the 340 // threads, and performance-wise it doesn't really matter. Adds about 1ms to 341 // full-gc. 342 { 343 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); 344 345 ShenandoahSATBBufferClosure cl(q); 346 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 347 while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); 348 bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(); 349 if (heap->has_forwarded_objects()) { 350 ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp); 351 MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations); 352 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, 353 ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL, 354 do_nmethods ? &blobsCl : NULL); 355 Threads::threads_do(&tc); 356 } else { 357 ShenandoahMarkRefsClosure mark_cl(q, rp); 358 MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations); 359 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, 360 ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL, 361 do_nmethods ? &blobsCl : NULL); 362 Threads::threads_do(&tc); 363 } 364 } 365 366 _cm->mark_loop(worker_id, _terminator, rp, 367 false, // not cancellable 368 _dedup_string); 369 370 assert(_cm->task_queues()->is_empty(), "Should be empty"); 371 } 372 }; 373 374 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) { 375 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); 376 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 377 378 ShenandoahHeap* heap = ShenandoahHeap::heap(); 379 380 ShenandoahGCPhase phase(root_phase); 381 382 WorkGang* workers = heap->workers(); 383 uint nworkers = workers->active_workers(); 384 385 assert(nworkers <= task_queues()->size(), "Just check"); 386 387 ShenandoahRootScanner root_proc(nworkers, root_phase); 388 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); 389 task_queues()->reserve(nworkers); 390 391 if (heap->has_forwarded_objects()) { 392 ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc); 393 workers->run_task(&mark_roots); 394 } else { 395 // No need to update references, which means the heap is stable. 396 // Can save time not walking through forwarding pointers. 397 ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc); 398 workers->run_task(&mark_roots); 399 } 400 } 401 402 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) { 403 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 404 assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots || 405 root_phase == ShenandoahPhaseTimings::degen_gc_update_roots, 406 "Only for these phases"); 407 408 ShenandoahGCPhase phase(root_phase); 409 410 bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots; 411 412 #if COMPILER2_OR_JVMCI 413 DerivedPointerTable::clear(); 414 #endif 415 416 uint nworkers = _heap->workers()->active_workers(); 417 418 ShenandoahRootUpdater root_updater(nworkers, root_phase); 419 ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive); 420 _heap->workers()->run_task(&update_roots); 421 422 #if COMPILER2_OR_JVMCI 423 DerivedPointerTable::update_pointers(); 424 #endif 425 } 426 427 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask { 428 private: 429 ShenandoahThreadRoots _thread_roots; 430 ShenandoahPhaseTimings::Phase _phase; 431 ShenandoahGCWorkerPhase _worker_phase; 432 public: 433 ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) : 434 AbstractGangTask("Shenandoah Update Thread Roots"), 435 _thread_roots(phase, is_par), 436 _phase(phase), 437 _worker_phase(phase) {} 438 439 void work(uint worker_id) { 440 ShenandoahParallelWorkerSession worker_session(worker_id); 441 ShenandoahUpdateRefsClosure cl; 442 _thread_roots.oops_do(&cl, NULL, worker_id); 443 } 444 }; 445 446 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { 447 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 448 449 ShenandoahGCPhase phase(root_phase); 450 451 #if COMPILER2_OR_JVMCI 452 DerivedPointerTable::clear(); 453 #endif 454 455 WorkGang* workers = _heap->workers(); 456 bool is_par = workers->active_workers() > 1; 457 458 ShenandoahUpdateThreadRootsTask task(is_par, root_phase); 459 workers->run_task(&task); 460 461 #if COMPILER2_OR_JVMCI 462 DerivedPointerTable::update_pointers(); 463 #endif 464 } 465 466 void ShenandoahConcurrentMark::initialize(uint workers) { 467 _heap = ShenandoahHeap::heap(); 468 469 uint num_queues = MAX2(workers, 1U); 470 471 _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues); 472 473 for (uint i = 0; i < num_queues; ++i) { 474 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 475 task_queue->initialize(); 476 _task_queues->register_queue(i, task_queue); 477 } 478 } 479 480 // Mark concurrent roots during concurrent phases 481 class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask { 482 private: 483 SuspendibleThreadSetJoiner _sts_joiner; 484 ShenandoahConcurrentRootsIterator<true /* concurrent */, false /* single-threaded */> _itr; 485 ShenandoahObjToScanQueueSet* const _queue_set; 486 ReferenceProcessor* const _rp; 487 488 public: 489 ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, 490 ReferenceProcessor* rp, 491 ShenandoahPhaseTimings::Phase phase); 492 void work(uint worker_id); 493 }; 494 495 ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs, 496 ReferenceProcessor* rp, 497 ShenandoahPhaseTimings::Phase phase) : 498 AbstractGangTask("Shenandoah Concurrent Mark Task"), 499 _itr(phase), 500 _queue_set(qs), 501 _rp(rp) { 502 assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected"); 503 } 504 505 void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) { 506 ShenandoahConcurrentWorkerSession worker_session(worker_id); 507 ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id); 508 ShenandoahMarkResolveRefsClosure cl(q, _rp); 509 _itr.oops_do(&cl, worker_id); 510 } 511 512 void ShenandoahConcurrentMark::mark_from_roots() { 513 WorkGang* workers = _heap->workers(); 514 uint nworkers = workers->active_workers(); 515 516 ReferenceProcessor* rp = NULL; 517 if (_heap->process_references()) { 518 rp = _heap->ref_processor(); 519 rp->set_active_mt_degree(nworkers); 520 521 // enable ("weak") refs discovery 522 rp->enable_discovery(true /*verify_no_refs*/); 523 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 524 } 525 526 shenandoah_assert_rp_isalive_not_installed(); 527 ShenandoahIsAliveSelector is_alive; 528 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); 529 530 task_queues()->reserve(nworkers); 531 532 { 533 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots); 534 // Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock 535 ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots); 536 workers->run_task(&task); 537 } 538 539 { 540 TaskTerminator terminator(nworkers, task_queues()); 541 ShenandoahConcurrentMarkingTask task(this, &terminator); 542 workers->run_task(&task); 543 } 544 545 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled"); 546 } 547 548 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) { 549 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 550 551 uint nworkers = _heap->workers()->active_workers(); 552 553 { 554 shenandoah_assert_rp_isalive_not_installed(); 555 ShenandoahIsAliveSelector is_alive; 556 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); 557 558 559 // Full GC does not execute concurrent cycle. 560 // Degenerated cycle may bypass concurrent cycle. 561 // So concurrent roots might not be scanned, scan them here. 562 // Ideally, this should be piggyback to ShenandoahFinalMarkingTask, but it makes time tracking 563 // very hard. Given full GC and degenerated GC should be rare, let's use separate task. 564 if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) { 565 ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ? 566 ShenandoahPhaseTimings::full_gc_scan_conc_roots : 567 ShenandoahPhaseTimings::degen_gc_scan_conc_roots; 568 ShenandoahGCPhase gc_phase(phase); 569 if (_heap->has_forwarded_objects()) { 570 ShenandoahProcessConcurrentRootsTask<ShenandoahMarkResolveRefsClosure> task(this, phase); 571 _heap->workers()->run_task(&task); 572 } else { 573 ShenandoahProcessConcurrentRootsTask<ShenandoahMarkRefsClosure> task(this, phase); 574 _heap->workers()->run_task(&task); 575 } 576 } 577 578 579 // Finally mark everything else we've got in our queues during the previous steps. 580 // It does two different things for concurrent vs. mark-compact GC: 581 // - For concurrent GC, it starts with empty task queues, drains the remaining 582 // SATB buffers, and then completes the marking closure. 583 // - For mark-compact GC, it starts out with the task queues seeded by initial 584 // root scan, and completes the closure, thus marking through all live objects 585 // The implementation is the same, so it's shared here. 586 { 587 ShenandoahGCPhase phase(full_gc ? 588 ShenandoahPhaseTimings::full_gc_mark_finish_queues : 589 ShenandoahPhaseTimings::finish_queues); 590 task_queues()->reserve(nworkers); 591 592 StrongRootsScope scope(nworkers); 593 TaskTerminator terminator(nworkers, task_queues()); 594 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); 595 _heap->workers()->run_task(&task); 596 } 597 598 assert(task_queues()->is_empty(), "Should be empty"); 599 } 600 601 // When we're done marking everything, we process weak references. 602 if (_heap->process_references()) { 603 weak_refs_work(full_gc); 604 } 605 606 assert(task_queues()->is_empty(), "Should be empty"); 607 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); 608 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); 609 } 610 611 // Weak Reference Closures 612 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { 613 uint _worker_id; 614 TaskTerminator* _terminator; 615 bool _reset_terminator; 616 617 public: 618 ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false): 619 _worker_id(worker_id), 620 _terminator(t), 621 _reset_terminator(reset_terminator) { 622 } 623 624 void do_void() { 625 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 626 627 ShenandoahHeap* sh = ShenandoahHeap::heap(); 628 ShenandoahConcurrentMark* scm = sh->concurrent_mark(); 629 assert(sh->process_references(), "why else would we be here?"); 630 ReferenceProcessor* rp = sh->ref_processor(); 631 632 shenandoah_assert_rp_isalive_installed(); 633 634 scm->mark_loop(_worker_id, _terminator, rp, 635 false, // not cancellable 636 false); // do not do strdedup 637 638 if (_reset_terminator) { 639 _terminator->reset_for_reuse(); 640 } 641 } 642 }; 643 644 class ShenandoahCMKeepAliveClosure : public OopClosure { 645 private: 646 ShenandoahObjToScanQueue* _queue; 647 ShenandoahHeap* _heap; 648 ShenandoahMarkingContext* const _mark_context; 649 650 template <class T> 651 inline void do_oop_work(T* p) { 652 ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context); 653 } 654 655 public: 656 ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : 657 _queue(q), 658 _heap(ShenandoahHeap::heap()), 659 _mark_context(_heap->marking_context()) {} 660 661 void do_oop(narrowOop* p) { do_oop_work(p); } 662 void do_oop(oop* p) { do_oop_work(p); } 663 }; 664 665 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { 666 private: 667 ShenandoahObjToScanQueue* _queue; 668 ShenandoahHeap* _heap; 669 ShenandoahMarkingContext* const _mark_context; 670 671 template <class T> 672 inline void do_oop_work(T* p) { 673 ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context); 674 } 675 676 public: 677 ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 678 _queue(q), 679 _heap(ShenandoahHeap::heap()), 680 _mark_context(_heap->marking_context()) {} 681 682 void do_oop(narrowOop* p) { do_oop_work(p); } 683 void do_oop(oop* p) { do_oop_work(p); } 684 }; 685 686 class ShenandoahWeakUpdateClosure : public OopClosure { 687 private: 688 ShenandoahHeap* const _heap; 689 690 template <class T> 691 inline void do_oop_work(T* p) { 692 oop o = _heap->maybe_update_with_forwarded(p); 693 shenandoah_assert_marked_except(p, o, o == NULL); 694 } 695 696 public: 697 ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {} 698 699 void do_oop(narrowOop* p) { do_oop_work(p); } 700 void do_oop(oop* p) { do_oop_work(p); } 701 }; 702 703 class ShenandoahRefProcTaskProxy : public AbstractGangTask { 704 private: 705 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 706 TaskTerminator* _terminator; 707 708 public: 709 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 710 TaskTerminator* t) : 711 AbstractGangTask("Process reference objects in parallel"), 712 _proc_task(proc_task), 713 _terminator(t) { 714 } 715 716 void work(uint worker_id) { 717 ResourceMark rm; 718 HandleMark hm; 719 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 720 ShenandoahHeap* heap = ShenandoahHeap::heap(); 721 ShenandoahParallelWorkerSession worker_session(worker_id); 722 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator); 723 if (heap->has_forwarded_objects()) { 724 ShenandoahForwardedIsAliveClosure is_alive; 725 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); 726 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 727 } else { 728 ShenandoahIsAliveClosure is_alive; 729 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); 730 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 731 } 732 } 733 }; 734 735 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 736 private: 737 WorkGang* _workers; 738 739 public: 740 ShenandoahRefProcTaskExecutor(WorkGang* workers) : 741 _workers(workers) { 742 } 743 744 // Executes a task using worker threads. 745 void execute(ProcessTask& task, uint ergo_workers) { 746 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 747 748 ShenandoahHeap* heap = ShenandoahHeap::heap(); 749 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 750 ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(), 751 ergo_workers, 752 /* do_check = */ false); 753 uint nworkers = _workers->active_workers(); 754 cm->task_queues()->reserve(nworkers); 755 TaskTerminator terminator(nworkers, cm->task_queues()); 756 ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator); 757 _workers->run_task(&proc_task_proxy); 758 } 759 }; 760 761 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) { 762 assert(_heap->process_references(), "sanity"); 763 764 ShenandoahPhaseTimings::Phase phase_root = 765 full_gc ? 766 ShenandoahPhaseTimings::full_gc_weakrefs : 767 ShenandoahPhaseTimings::weakrefs; 768 769 ShenandoahGCPhase phase(phase_root); 770 771 ReferenceProcessor* rp = _heap->ref_processor(); 772 773 // NOTE: We cannot shortcut on has_discovered_references() here, because 774 // we will miss marking JNI Weak refs then, see implementation in 775 // ReferenceProcessor::process_discovered_references. 776 weak_refs_work_doit(full_gc); 777 778 rp->verify_no_references_recorded(); 779 assert(!rp->discovery_enabled(), "Post condition"); 780 781 } 782 783 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { 784 ReferenceProcessor* rp = _heap->ref_processor(); 785 786 ShenandoahPhaseTimings::Phase phase_process = 787 full_gc ? 788 ShenandoahPhaseTimings::full_gc_weakrefs_process : 789 ShenandoahPhaseTimings::weakrefs_process; 790 791 shenandoah_assert_rp_isalive_not_installed(); 792 ShenandoahIsAliveSelector is_alive; 793 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); 794 795 WorkGang* workers = _heap->workers(); 796 uint nworkers = workers->active_workers(); 797 798 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 799 rp->set_active_mt_degree(nworkers); 800 801 assert(task_queues()->is_empty(), "Should be empty"); 802 803 // complete_gc and keep_alive closures instantiated here are only needed for 804 // single-threaded path in RP. They share the queue 0 for tracking work, which 805 // simplifies implementation. Since RP may decide to call complete_gc several 806 // times, we need to be able to reuse the terminator. 807 uint serial_worker_id = 0; 808 TaskTerminator terminator(1, task_queues()); 809 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 810 811 ShenandoahRefProcTaskExecutor executor(workers); 812 813 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); 814 815 { 816 // Note: Don't emit JFR event for this phase, to avoid overflow nesting phase level. 817 // Reference Processor emits 2 levels JFR event, that can get us over the JFR 818 // event nesting level limits, in case of degenerated GC gets upgraded to 819 // full GC. 820 ShenandoahTimingsTracker phase_timing(phase_process); 821 822 if (_heap->has_forwarded_objects()) { 823 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id)); 824 const ReferenceProcessorStats& stats = 825 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive, 826 &complete_gc, &executor, 827 &pt); 828 _heap->tracer()->report_gc_reference_stats(stats); 829 } else { 830 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id)); 831 const ReferenceProcessorStats& stats = 832 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive, 833 &complete_gc, &executor, 834 &pt); 835 _heap->tracer()->report_gc_reference_stats(stats); 836 } 837 838 pt.print_all_references(); 839 840 assert(task_queues()->is_empty(), "Should be empty"); 841 } 842 } 843 844 class ShenandoahCancelledGCYieldClosure : public YieldClosure { 845 private: 846 ShenandoahHeap* const _heap; 847 public: 848 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 849 virtual bool should_return() { return _heap->cancelled_gc(); } 850 }; 851 852 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { 853 public: 854 void do_void() { 855 ShenandoahHeap* sh = ShenandoahHeap::heap(); 856 ShenandoahConcurrentMark* scm = sh->concurrent_mark(); 857 assert(sh->process_references(), "why else would we be here?"); 858 TaskTerminator terminator(1, scm->task_queues()); 859 860 ReferenceProcessor* rp = sh->ref_processor(); 861 shenandoah_assert_rp_isalive_installed(); 862 863 scm->mark_loop(0, &terminator, rp, 864 false, // not cancellable 865 false); // do not do strdedup 866 } 867 }; 868 869 class ShenandoahPrecleanTask : public AbstractGangTask { 870 private: 871 ReferenceProcessor* _rp; 872 873 public: 874 ShenandoahPrecleanTask(ReferenceProcessor* rp) : 875 AbstractGangTask("Precleaning task"), 876 _rp(rp) {} 877 878 void work(uint worker_id) { 879 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 880 ShenandoahParallelWorkerSession worker_session(worker_id); 881 882 ShenandoahHeap* sh = ShenandoahHeap::heap(); 883 assert(!sh->has_forwarded_objects(), "No forwarded objects expected here"); 884 885 ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id); 886 887 ShenandoahCancelledGCYieldClosure yield; 888 ShenandoahPrecleanCompleteGCClosure complete_gc; 889 890 ShenandoahIsAliveClosure is_alive; 891 ShenandoahCMKeepAliveClosure keep_alive(q); 892 ResourceMark rm; 893 _rp->preclean_discovered_references(&is_alive, &keep_alive, 894 &complete_gc, &yield, 895 NULL); 896 } 897 }; 898 899 void ShenandoahConcurrentMark::preclean_weak_refs() { 900 // Pre-cleaning weak references before diving into STW makes sense at the 901 // end of concurrent mark. This will filter out the references which referents 902 // are alive. Note that ReferenceProcessor already filters out these on reference 903 // discovery, and the bulk of work is done here. This phase processes leftovers 904 // that missed the initial filtering, i.e. when referent was marked alive after 905 // reference was discovered by RP. 906 907 assert(_heap->process_references(), "sanity"); 908 909 // Shortcut if no references were discovered to avoid winding up threads. 910 ReferenceProcessor* rp = _heap->ref_processor(); 911 if (!rp->has_discovered_references()) { 912 return; 913 } 914 915 assert(task_queues()->is_empty(), "Should be empty"); 916 917 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 918 919 shenandoah_assert_rp_isalive_not_installed(); 920 ShenandoahIsAliveSelector is_alive; 921 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); 922 923 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 924 // queues and other goodies. When upstream ReferenceProcessor starts supporting 925 // parallel precleans, we can extend this to more threads. 926 WorkGang* workers = _heap->workers(); 927 uint nworkers = workers->active_workers(); 928 assert(nworkers == 1, "This code uses only a single worker"); 929 task_queues()->reserve(nworkers); 930 931 ShenandoahPrecleanTask task(rp); 932 workers->run_task(&task); 933 934 assert(task_queues()->is_empty(), "Should be empty"); 935 } 936 937 void ShenandoahConcurrentMark::cancel() { 938 // Clean up marking stacks. 939 ShenandoahObjToScanQueueSet* queues = task_queues(); 940 queues->clear(); 941 942 // Cancel SATB buffers. 943 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); 944 } 945 946 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) { 947 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id); 948 return _task_queues->queue(worker_id); 949 } 950 951 template <bool CANCELLABLE> 952 void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp, 953 bool strdedup) { 954 ShenandoahObjToScanQueue* q = get_queue(w); 955 956 ShenandoahLiveData* ld = _heap->get_liveness_cache(w); 957 958 // TODO: We can clean up this if we figure out how to do templated oop closures that 959 // play nice with specialized_oop_iterators. 960 if (_heap->unload_classes()) { 961 if (_heap->has_forwarded_objects()) { 962 if (strdedup) { 963 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp); 964 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); 965 } else { 966 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); 967 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); 968 } 969 } else { 970 if (strdedup) { 971 ShenandoahMarkRefsMetadataDedupClosure cl(q, rp); 972 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); 973 } else { 974 ShenandoahMarkRefsMetadataClosure cl(q, rp); 975 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); 976 } 977 } 978 } else { 979 if (_heap->has_forwarded_objects()) { 980 if (strdedup) { 981 ShenandoahMarkUpdateRefsDedupClosure cl(q, rp); 982 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); 983 } else { 984 ShenandoahMarkUpdateRefsClosure cl(q, rp); 985 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t); 986 } 987 } else { 988 if (strdedup) { 989 ShenandoahMarkRefsDedupClosure cl(q, rp); 990 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); 991 } else { 992 ShenandoahMarkRefsClosure cl(q, rp); 993 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t); 994 } 995 } 996 } 997 998 _heap->flush_liveness_cache(w); 999 } 1000 1001 template <class T, bool CANCELLABLE> 1002 void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator) { 1003 uintx stride = ShenandoahMarkLoopStride; 1004 1005 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1006 ShenandoahObjToScanQueueSet* queues = task_queues(); 1007 ShenandoahObjToScanQueue* q; 1008 ShenandoahMarkTask t; 1009 1010 /* 1011 * Process outstanding queues, if any. 1012 * 1013 * There can be more queues than workers. To deal with the imbalance, we claim 1014 * extra queues first. Since marking can push new tasks into the queue associated 1015 * with this worker id, we come back to process this queue in the normal loop. 1016 */ 1017 assert(queues->get_reserved() == heap->workers()->active_workers(), 1018 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers()); 1019 1020 q = queues->claim_next(); 1021 while (q != NULL) { 1022 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 1023 return; 1024 } 1025 1026 for (uint i = 0; i < stride; i++) { 1027 if (q->pop(t)) { 1028 do_task<T>(q, cl, live_data, &t); 1029 } else { 1030 assert(q->is_empty(), "Must be empty"); 1031 q = queues->claim_next(); 1032 break; 1033 } 1034 } 1035 } 1036 q = get_queue(worker_id); 1037 1038 ShenandoahSATBBufferClosure drain_satb(q); 1039 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 1040 1041 /* 1042 * Normal marking loop: 1043 */ 1044 while (true) { 1045 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 1046 return; 1047 } 1048 1049 while (satb_mq_set.completed_buffers_num() > 0) { 1050 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 1051 } 1052 1053 uint work = 0; 1054 for (uint i = 0; i < stride; i++) { 1055 if (q->pop(t) || 1056 queues->steal(worker_id, t)) { 1057 do_task<T>(q, cl, live_data, &t); 1058 work++; 1059 } else { 1060 break; 1061 } 1062 } 1063 1064 if (work == 0) { 1065 // No work encountered in current stride, try to terminate. 1066 // Need to leave the STS here otherwise it might block safepoints. 1067 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers); 1068 ShenandoahTerminatorTerminator tt(heap); 1069 if (terminator->offer_termination(&tt)) return; 1070 } 1071 } 1072 }