rev 59458 : 8245827: Shenandoah: Cleanup Shenandoah code root iterators and root scanner
rev 59459 : 8245880: Shenandoah: should not mark all code roots if class unloading is enabled

   1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTrace.hpp"
  34 #include "gc/shared/referenceProcessor.hpp"
  35 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  36 #include "gc/shared/strongRootsScope.hpp"
  37 
  38 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  41 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  46 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 
  49 #include "memory/iterator.inline.hpp"
  50 #include "memory/metaspace.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 
  55 template<UpdateRefsMode UPDATE_REFS>
  56 class ShenandoahInitMarkRootsClosure : public OopClosure {
  57 private:
  58   ShenandoahObjToScanQueue* _queue;
  59   ShenandoahHeap* _heap;
  60   ShenandoahMarkingContext* const _mark_context;
  61 
  62   template <class T>
  63   inline void do_oop_work(T* p) {
  64     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  65   }
  66 
  67 public:
  68   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  69     _queue(q),
  70     _heap(ShenandoahHeap::heap()),
  71     _mark_context(_heap->marking_context()) {};
  72 
  73   void do_oop(narrowOop* p) { do_oop_work(p); }
  74   void do_oop(oop* p)       { do_oop_work(p); }
  75 };
  76 
  77 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  78   MetadataVisitingOopIterateClosure(rp),
  79   _queue(q),
  80   _heap(ShenandoahHeap::heap()),
  81   _mark_context(_heap->marking_context())
  82 { }
  83 
  84 template<UpdateRefsMode UPDATE_REFS>
  85 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  86 private:
  87   ShenandoahRootScanner* _rp;
  88 public:
  89   ShenandoahInitMarkRootsTask(ShenandoahRootScanner* rp) :
  90     AbstractGangTask("Shenandoah init mark roots task"),
  91     _rp(rp) {
  92   }
  93 
  94   void work(uint worker_id) {
  95     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  96     ShenandoahParallelWorkerSession worker_session(worker_id);
  97 
  98     ShenandoahHeap* heap = ShenandoahHeap::heap();
  99     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 100     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 101 
 102     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 103 
 104     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 105     do_work(heap, &mark_cl, worker_id);
 106   }
 107 
 108 private:
 109   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 110     // The rationale for selecting the roots to scan is as follows:
 111     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 112     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 113     //      invalidate the relevant code cache blobs. This could be only done together with
 114     //      class unloading.
 115     //   b. With unload_classes = false, we have to nominally retain all the references from code
 116     //      cache, because there could be the case of embedded class/oop in the generated code,
 117     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 118     //      we risk executing that code cache blob, and crashing.
 119     if (heap->unload_classes()) {
 120       _rp->strong_roots_do(worker_id, oops);
 121     } else {
 122       _rp->roots_do(worker_id, oops);
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 128 private:
 129   ShenandoahRootUpdater*  _root_updater;
 130   bool                    _check_alive;
 131 public:
 132   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater, bool check_alive) :
 133     AbstractGangTask("Shenandoah update roots task"),
 134     _root_updater(root_updater),
 135     _check_alive(check_alive){
 136   }
 137 
 138   void work(uint worker_id) {
 139     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 140     ShenandoahParallelWorkerSession worker_session(worker_id);
 141 
 142     ShenandoahHeap* heap = ShenandoahHeap::heap();
 143     ShenandoahUpdateRefsClosure cl;
 144     if (_check_alive) {
 145       ShenandoahForwardedIsAliveClosure is_alive;
 146       _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
 147     } else {
 148       AlwaysTrueClosure always_true;;
 149       _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 150     }
 151   }
 152 };
 153 
 154 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 155 private:
 156   ShenandoahConcurrentMark* _cm;
 157   TaskTerminator* _terminator;
 158 
 159 public:
 160   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
 161     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 162   }
 163 
 164   void work(uint worker_id) {
 165     ShenandoahHeap* heap = ShenandoahHeap::heap();
 166     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 167     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 168     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 169     ReferenceProcessor* rp;
 170     if (heap->process_references()) {
 171       rp = heap->ref_processor();
 172       shenandoah_assert_rp_isalive_installed();
 173     } else {
 174       rp = NULL;
 175     }
 176 
 177     if (!heap->unload_classes()) {
 178       _cm->concurrent_scan_code_roots(worker_id, rp);
 179     }
 180 
 181     _cm->mark_loop(worker_id, _terminator, rp,
 182                    true, // cancellable
 183                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 184   }
 185 };
 186 
 187 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
 188 private:
 189   ShenandoahSATBBufferClosure* _satb_cl;
 190   OopClosure*            const _cl;
 191   MarkingCodeBlobClosure*      _code_cl;
 192   uintx _claim_token;
 193 
 194 public:
 195   ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
 196     _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
 197     _claim_token(Threads::thread_claim_token()) {}
 198 
 199   void do_thread(Thread* thread) {
 200     if (thread->claim_threads_do(true, _claim_token)) {
 201       ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 202       if (thread->is_Java_thread()) {
 203         if (_cl != NULL) {
 204           ResourceMark rm;
 205           thread->oops_do(_cl, _code_cl);
 206         } else if (_code_cl != NULL) {
 207           // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
 208           // however the liveness of oops reachable from nmethods have very complex lifecycles:
 209           // * Alive if on the stack of an executing method
 210           // * Weakly reachable otherwise
 211           // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
 212           // live by the SATB invariant but other oops recorded in nmethods may behave differently.
 213           JavaThread* jt = (JavaThread*)thread;
 214           jt->nmethods_do(_code_cl);
 215         }
 216       }
 217     }
 218   }
 219 };
 220 
 221 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 222 private:
 223   ShenandoahConcurrentMark* _cm;
 224   TaskTerminator*           _terminator;
 225   bool _dedup_string;
 226 
 227 public:
 228   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
 229     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 230   }
 231 
 232   void work(uint worker_id) {
 233     ShenandoahHeap* heap = ShenandoahHeap::heap();
 234 
 235     ShenandoahParallelWorkerSession worker_session(worker_id);
 236     ReferenceProcessor* rp;
 237     if (heap->process_references()) {
 238       rp = heap->ref_processor();
 239       shenandoah_assert_rp_isalive_installed();
 240     } else {
 241       rp = NULL;
 242     }
 243 
 244     // First drain remaining SATB buffers.
 245     // Notice that this is not strictly necessary for mark-compact. But since
 246     // it requires a StrongRootsScope around the task, we need to claim the
 247     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 248     // full-gc.
 249     {
 250       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 251 
 252       ShenandoahSATBBufferClosure cl(q);
 253       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 254       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 255       bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
 256       if (heap->has_forwarded_objects()) {
 257         ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
 258         MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
 259         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 260                                                           ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
 261                                                           do_nmethods ? &blobsCl : NULL);
 262         Threads::threads_do(&tc);
 263       } else {
 264         ShenandoahMarkRefsClosure mark_cl(q, rp);
 265         MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
 266         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 267                                                           ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
 268                                                           do_nmethods ? &blobsCl : NULL);
 269         Threads::threads_do(&tc);
 270       }
 271     }
 272 
 273     if (heap->is_degenerated_gc_in_progress() &&
 274         !heap->unload_classes()) {
 275       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 276       // let's check here.
 277       _cm->concurrent_scan_code_roots(worker_id, rp);
 278     }
 279 
 280     _cm->mark_loop(worker_id, _terminator, rp,
 281                    false, // not cancellable
 282                    _dedup_string);
 283 
 284     assert(_cm->task_queues()->is_empty(), "Should be empty");
 285   }
 286 };
 287 
 288 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 289   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 290   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 291 
 292   ShenandoahHeap* heap = ShenandoahHeap::heap();
 293 
 294   ShenandoahGCPhase phase(root_phase);
 295 
 296   WorkGang* workers = heap->workers();
 297   uint nworkers = workers->active_workers();
 298 
 299   assert(nworkers <= task_queues()->size(), "Just check");
 300 
 301   ShenandoahRootScanner root_proc(nworkers, root_phase);
 302   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 303   task_queues()->reserve(nworkers);
 304 
 305   if (heap->has_forwarded_objects()) {
 306     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
 307     workers->run_task(&mark_roots);
 308   } else {
 309     // No need to update references, which means the heap is stable.
 310     // Can save time not walking through forwarding pointers.
 311     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
 312     workers->run_task(&mark_roots);
 313   }
 314 
 315   clear_claim_codecache();
 316 }
 317 
 318 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 319   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 320   assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
 321          root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
 322          "Only for these phases");
 323 
 324   ShenandoahGCPhase phase(root_phase);
 325 
 326   bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
 327 
 328 #if COMPILER2_OR_JVMCI
 329   DerivedPointerTable::clear();
 330 #endif
 331 
 332   uint nworkers = _heap->workers()->active_workers();
 333 
 334   ShenandoahRootUpdater root_updater(nworkers, root_phase);
 335   ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
 336   _heap->workers()->run_task(&update_roots);
 337 
 338 #if COMPILER2_OR_JVMCI
 339   DerivedPointerTable::update_pointers();
 340 #endif
 341 }
 342 
 343 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 344 private:
 345   ShenandoahThreadRoots           _thread_roots;
 346   ShenandoahPhaseTimings::Phase   _phase;
 347   ShenandoahGCWorkerPhase         _worker_phase;
 348 public:
 349   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 350     AbstractGangTask("Shenandoah Update Thread Roots"),
 351     _thread_roots(phase, is_par),
 352     _phase(phase),
 353     _worker_phase(phase) {}
 354 
 355   void work(uint worker_id) {
 356     ShenandoahParallelWorkerSession worker_session(worker_id);
 357     ShenandoahUpdateRefsClosure cl;
 358     _thread_roots.oops_do(&cl, NULL, worker_id);
 359   }
 360 };
 361 
 362 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 363   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 364 
 365   ShenandoahGCPhase phase(root_phase);
 366 
 367 #if COMPILER2_OR_JVMCI
 368   DerivedPointerTable::clear();
 369 #endif
 370 
 371   WorkGang* workers = _heap->workers();
 372   bool is_par = workers->active_workers() > 1;
 373 
 374   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 375   workers->run_task(&task);
 376 
 377 #if COMPILER2_OR_JVMCI
 378   DerivedPointerTable::update_pointers();
 379 #endif
 380 }
 381 
 382 void ShenandoahConcurrentMark::initialize(uint workers) {
 383   _heap = ShenandoahHeap::heap();
 384 
 385   uint num_queues = MAX2(workers, 1U);
 386 
 387   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 388 
 389   for (uint i = 0; i < num_queues; ++i) {
 390     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 391     task_queue->initialize();
 392     _task_queues->register_queue(i, task_queue);
 393   }
 394 }
 395 
 396 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 397   if (claim_codecache()) {
 398     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 399     if (!_heap->unload_classes()) {
 400       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 401       // TODO: We can not honor StringDeduplication here, due to lock ranking
 402       // inversion. So, we may miss some deduplication candidates.
 403       if (_heap->has_forwarded_objects()) {
 404         ShenandoahMarkResolveRefsClosure cl(q, rp);
 405         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 406         CodeCache::blobs_do(&blobs);
 407       } else {
 408         ShenandoahMarkRefsClosure cl(q, rp);
 409         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 410         CodeCache::blobs_do(&blobs);
 411       }
 412     }
 413   }
 414 }
 415 
 416 void ShenandoahConcurrentMark::mark_from_roots() {
 417   WorkGang* workers = _heap->workers();
 418   uint nworkers = workers->active_workers();
 419 
 420   if (_heap->process_references()) {
 421     ReferenceProcessor* rp = _heap->ref_processor();
 422     rp->set_active_mt_degree(nworkers);
 423 
 424     // enable ("weak") refs discovery
 425     rp->enable_discovery(true /*verify_no_refs*/);
 426     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 427   }
 428 
 429   shenandoah_assert_rp_isalive_not_installed();
 430   ShenandoahIsAliveSelector is_alive;
 431   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 432 
 433   task_queues()->reserve(nworkers);
 434 
 435   {
 436     TaskTerminator terminator(nworkers, task_queues());
 437     ShenandoahConcurrentMarkingTask task(this, &terminator);
 438     workers->run_task(&task);
 439   }
 440 
 441   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 442 }
 443 
 444 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 445   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 446 
 447   uint nworkers = _heap->workers()->active_workers();
 448 
 449   // Finally mark everything else we've got in our queues during the previous steps.
 450   // It does two different things for concurrent vs. mark-compact GC:
 451   // - For concurrent GC, it starts with empty task queues, drains the remaining
 452   //   SATB buffers, and then completes the marking closure.
 453   // - For mark-compact GC, it starts out with the task queues seeded by initial
 454   //   root scan, and completes the closure, thus marking through all live objects
 455   // The implementation is the same, so it's shared here.
 456   {
 457     ShenandoahGCPhase phase(full_gc ?
 458                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 459                             ShenandoahPhaseTimings::finish_queues);
 460     task_queues()->reserve(nworkers);
 461 
 462     shenandoah_assert_rp_isalive_not_installed();
 463     ShenandoahIsAliveSelector is_alive;
 464     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 465 
 466     StrongRootsScope scope(nworkers);
 467     TaskTerminator terminator(nworkers, task_queues());
 468     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 469     _heap->workers()->run_task(&task);
 470   }
 471 
 472   assert(task_queues()->is_empty(), "Should be empty");
 473 
 474   // When we're done marking everything, we process weak references.
 475   if (_heap->process_references()) {
 476     weak_refs_work(full_gc);
 477   }
 478 
 479   assert(task_queues()->is_empty(), "Should be empty");
 480   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 481   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 482 }
 483 
 484 // Weak Reference Closures
 485 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 486   uint _worker_id;
 487   TaskTerminator* _terminator;
 488   bool _reset_terminator;
 489 
 490 public:
 491   ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
 492     _worker_id(worker_id),
 493     _terminator(t),
 494     _reset_terminator(reset_terminator) {
 495   }
 496 
 497   void do_void() {
 498     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 499 
 500     ShenandoahHeap* sh = ShenandoahHeap::heap();
 501     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 502     assert(sh->process_references(), "why else would we be here?");
 503     ReferenceProcessor* rp = sh->ref_processor();
 504 
 505     shenandoah_assert_rp_isalive_installed();
 506 
 507     scm->mark_loop(_worker_id, _terminator, rp,
 508                    false,   // not cancellable
 509                    false);  // do not do strdedup
 510 
 511     if (_reset_terminator) {
 512       _terminator->reset_for_reuse();
 513     }
 514   }
 515 };
 516 
 517 class ShenandoahCMKeepAliveClosure : public OopClosure {
 518 private:
 519   ShenandoahObjToScanQueue* _queue;
 520   ShenandoahHeap* _heap;
 521   ShenandoahMarkingContext* const _mark_context;
 522 
 523   template <class T>
 524   inline void do_oop_work(T* p) {
 525     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 526   }
 527 
 528 public:
 529   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 530     _queue(q),
 531     _heap(ShenandoahHeap::heap()),
 532     _mark_context(_heap->marking_context()) {}
 533 
 534   void do_oop(narrowOop* p) { do_oop_work(p); }
 535   void do_oop(oop* p)       { do_oop_work(p); }
 536 };
 537 
 538 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 539 private:
 540   ShenandoahObjToScanQueue* _queue;
 541   ShenandoahHeap* _heap;
 542   ShenandoahMarkingContext* const _mark_context;
 543 
 544   template <class T>
 545   inline void do_oop_work(T* p) {
 546     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 547   }
 548 
 549 public:
 550   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 551     _queue(q),
 552     _heap(ShenandoahHeap::heap()),
 553     _mark_context(_heap->marking_context()) {}
 554 
 555   void do_oop(narrowOop* p) { do_oop_work(p); }
 556   void do_oop(oop* p)       { do_oop_work(p); }
 557 };
 558 
 559 class ShenandoahWeakUpdateClosure : public OopClosure {
 560 private:
 561   ShenandoahHeap* const _heap;
 562 
 563   template <class T>
 564   inline void do_oop_work(T* p) {
 565     oop o = _heap->maybe_update_with_forwarded(p);
 566     shenandoah_assert_marked_except(p, o, o == NULL);
 567   }
 568 
 569 public:
 570   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 571 
 572   void do_oop(narrowOop* p) { do_oop_work(p); }
 573   void do_oop(oop* p)       { do_oop_work(p); }
 574 };
 575 
 576 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 577 private:
 578   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 579   TaskTerminator* _terminator;
 580 
 581 public:
 582   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 583                              TaskTerminator* t) :
 584     AbstractGangTask("Process reference objects in parallel"),
 585     _proc_task(proc_task),
 586     _terminator(t) {
 587   }
 588 
 589   void work(uint worker_id) {
 590     ResourceMark rm;
 591     HandleMark hm;
 592     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 593     ShenandoahHeap* heap = ShenandoahHeap::heap();
 594     ShenandoahParallelWorkerSession worker_session(worker_id);
 595     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 596     if (heap->has_forwarded_objects()) {
 597       ShenandoahForwardedIsAliveClosure is_alive;
 598       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 599       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 600     } else {
 601       ShenandoahIsAliveClosure is_alive;
 602       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 603       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 604     }
 605   }
 606 };
 607 
 608 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 609 private:
 610   WorkGang* _workers;
 611 
 612 public:
 613   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 614     _workers(workers) {
 615   }
 616 
 617   // Executes a task using worker threads.
 618   void execute(ProcessTask& task, uint ergo_workers) {
 619     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 620 
 621     ShenandoahHeap* heap = ShenandoahHeap::heap();
 622     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 623     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 624                                           ergo_workers,
 625                                           /* do_check = */ false);
 626     uint nworkers = _workers->active_workers();
 627     cm->task_queues()->reserve(nworkers);
 628     TaskTerminator terminator(nworkers, cm->task_queues());
 629     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 630     _workers->run_task(&proc_task_proxy);
 631   }
 632 };
 633 
 634 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 635   assert(_heap->process_references(), "sanity");
 636 
 637   ShenandoahPhaseTimings::Phase phase_root =
 638           full_gc ?
 639           ShenandoahPhaseTimings::full_gc_weakrefs :
 640           ShenandoahPhaseTimings::weakrefs;
 641 
 642   ShenandoahGCPhase phase(phase_root);
 643 
 644   ReferenceProcessor* rp = _heap->ref_processor();
 645 
 646   // NOTE: We cannot shortcut on has_discovered_references() here, because
 647   // we will miss marking JNI Weak refs then, see implementation in
 648   // ReferenceProcessor::process_discovered_references.
 649   weak_refs_work_doit(full_gc);
 650 
 651   rp->verify_no_references_recorded();
 652   assert(!rp->discovery_enabled(), "Post condition");
 653 
 654 }
 655 
 656 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 657   ReferenceProcessor* rp = _heap->ref_processor();
 658 
 659   ShenandoahPhaseTimings::Phase phase_process =
 660           full_gc ?
 661           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 662           ShenandoahPhaseTimings::weakrefs_process;
 663 
 664   shenandoah_assert_rp_isalive_not_installed();
 665   ShenandoahIsAliveSelector is_alive;
 666   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 667 
 668   WorkGang* workers = _heap->workers();
 669   uint nworkers = workers->active_workers();
 670 
 671   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 672   rp->set_active_mt_degree(nworkers);
 673 
 674   assert(task_queues()->is_empty(), "Should be empty");
 675 
 676   // complete_gc and keep_alive closures instantiated here are only needed for
 677   // single-threaded path in RP. They share the queue 0 for tracking work, which
 678   // simplifies implementation. Since RP may decide to call complete_gc several
 679   // times, we need to be able to reuse the terminator.
 680   uint serial_worker_id = 0;
 681   TaskTerminator terminator(1, task_queues());
 682   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 683 
 684   ShenandoahRefProcTaskExecutor executor(workers);
 685 
 686   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 687 
 688   {
 689     // Note: Don't emit JFR event for this phase, to avoid overflow nesting phase level.
 690     // Reference Processor emits 2 levels JFR event, that can get us over the JFR
 691     // event nesting level limits, in case of degenerated GC gets upgraded to
 692     // full GC.
 693     ShenandoahTimingsTracker phase_timing(phase_process);
 694 
 695     if (_heap->has_forwarded_objects()) {
 696       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 697       const ReferenceProcessorStats& stats =
 698         rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 699                                           &complete_gc, &executor,
 700                                           &pt);
 701        _heap->tracer()->report_gc_reference_stats(stats);
 702     } else {
 703       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 704       const ReferenceProcessorStats& stats =
 705         rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 706                                           &complete_gc, &executor,
 707                                           &pt);
 708       _heap->tracer()->report_gc_reference_stats(stats);
 709     }
 710 
 711     pt.print_all_references();
 712 
 713     assert(task_queues()->is_empty(), "Should be empty");
 714   }
 715 }
 716 
 717 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 718 private:
 719   ShenandoahHeap* const _heap;
 720 public:
 721   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 722   virtual bool should_return() { return _heap->cancelled_gc(); }
 723 };
 724 
 725 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 726 public:
 727   void do_void() {
 728     ShenandoahHeap* sh = ShenandoahHeap::heap();
 729     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 730     assert(sh->process_references(), "why else would we be here?");
 731     TaskTerminator terminator(1, scm->task_queues());
 732 
 733     ReferenceProcessor* rp = sh->ref_processor();
 734     shenandoah_assert_rp_isalive_installed();
 735 
 736     scm->mark_loop(0, &terminator, rp,
 737                    false, // not cancellable
 738                    false); // do not do strdedup
 739   }
 740 };
 741 
 742 class ShenandoahPrecleanTask : public AbstractGangTask {
 743 private:
 744   ReferenceProcessor* _rp;
 745 
 746 public:
 747   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 748           AbstractGangTask("Precleaning task"),
 749           _rp(rp) {}
 750 
 751   void work(uint worker_id) {
 752     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 753     ShenandoahParallelWorkerSession worker_session(worker_id);
 754 
 755     ShenandoahHeap* sh = ShenandoahHeap::heap();
 756     assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");
 757 
 758     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 759 
 760     ShenandoahCancelledGCYieldClosure yield;
 761     ShenandoahPrecleanCompleteGCClosure complete_gc;
 762 
 763     ShenandoahIsAliveClosure is_alive;
 764     ShenandoahCMKeepAliveClosure keep_alive(q);
 765     ResourceMark rm;
 766     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 767                                         &complete_gc, &yield,
 768                                         NULL);
 769   }
 770 };
 771 
 772 void ShenandoahConcurrentMark::preclean_weak_refs() {
 773   // Pre-cleaning weak references before diving into STW makes sense at the
 774   // end of concurrent mark. This will filter out the references which referents
 775   // are alive. Note that ReferenceProcessor already filters out these on reference
 776   // discovery, and the bulk of work is done here. This phase processes leftovers
 777   // that missed the initial filtering, i.e. when referent was marked alive after
 778   // reference was discovered by RP.
 779 
 780   assert(_heap->process_references(), "sanity");
 781 
 782   // Shortcut if no references were discovered to avoid winding up threads.
 783   ReferenceProcessor* rp = _heap->ref_processor();
 784   if (!rp->has_discovered_references()) {
 785     return;
 786   }
 787 
 788   assert(task_queues()->is_empty(), "Should be empty");
 789 
 790   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 791 
 792   shenandoah_assert_rp_isalive_not_installed();
 793   ShenandoahIsAliveSelector is_alive;
 794   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 795 
 796   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 797   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 798   // parallel precleans, we can extend this to more threads.
 799   WorkGang* workers = _heap->workers();
 800   uint nworkers = workers->active_workers();
 801   assert(nworkers == 1, "This code uses only a single worker");
 802   task_queues()->reserve(nworkers);
 803 
 804   ShenandoahPrecleanTask task(rp);
 805   workers->run_task(&task);
 806 
 807   assert(task_queues()->is_empty(), "Should be empty");
 808 }
 809 
 810 void ShenandoahConcurrentMark::cancel() {
 811   // Clean up marking stacks.
 812   ShenandoahObjToScanQueueSet* queues = task_queues();
 813   queues->clear();
 814 
 815   // Cancel SATB buffers.
 816   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 817 }
 818 
 819 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 820   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 821   return _task_queues->queue(worker_id);
 822 }
 823 
 824 template <bool CANCELLABLE>
 825 void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp,
 826                                                  bool strdedup) {
 827   ShenandoahObjToScanQueue* q = get_queue(w);
 828 
 829   ShenandoahLiveData* ld = _heap->get_liveness_cache(w);
 830 
 831   // TODO: We can clean up this if we figure out how to do templated oop closures that
 832   // play nice with specialized_oop_iterators.
 833   if (_heap->unload_classes()) {
 834     if (_heap->has_forwarded_objects()) {
 835       if (strdedup) {
 836         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 837         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 838       } else {
 839         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 840         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 841       }
 842     } else {
 843       if (strdedup) {
 844         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 845         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 846       } else {
 847         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 848         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 849       }
 850     }
 851   } else {
 852     if (_heap->has_forwarded_objects()) {
 853       if (strdedup) {
 854         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 855         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 856       } else {
 857         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 858         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 859       }
 860     } else {
 861       if (strdedup) {
 862         ShenandoahMarkRefsDedupClosure cl(q, rp);
 863         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 864       } else {
 865         ShenandoahMarkRefsClosure cl(q, rp);
 866         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 867       }
 868     }
 869   }
 870 
 871   _heap->flush_liveness_cache(w);
 872 }
 873 
 874 template <class T, bool CANCELLABLE>
 875 void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator) {
 876   uintx stride = ShenandoahMarkLoopStride;
 877 
 878   ShenandoahHeap* heap = ShenandoahHeap::heap();
 879   ShenandoahObjToScanQueueSet* queues = task_queues();
 880   ShenandoahObjToScanQueue* q;
 881   ShenandoahMarkTask t;
 882 
 883   /*
 884    * Process outstanding queues, if any.
 885    *
 886    * There can be more queues than workers. To deal with the imbalance, we claim
 887    * extra queues first. Since marking can push new tasks into the queue associated
 888    * with this worker id, we come back to process this queue in the normal loop.
 889    */
 890   assert(queues->get_reserved() == heap->workers()->active_workers(),
 891          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 892 
 893   q = queues->claim_next();
 894   while (q != NULL) {
 895     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 896       return;
 897     }
 898 
 899     for (uint i = 0; i < stride; i++) {
 900       if (q->pop(t)) {
 901         do_task<T>(q, cl, live_data, &t);
 902       } else {
 903         assert(q->is_empty(), "Must be empty");
 904         q = queues->claim_next();
 905         break;
 906       }
 907     }
 908   }
 909   q = get_queue(worker_id);
 910 
 911   ShenandoahSATBBufferClosure drain_satb(q);
 912   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 913 
 914   /*
 915    * Normal marking loop:
 916    */
 917   while (true) {
 918     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 919       return;
 920     }
 921 
 922     while (satb_mq_set.completed_buffers_num() > 0) {
 923       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 924     }
 925 
 926     uint work = 0;
 927     for (uint i = 0; i < stride; i++) {
 928       if (q->pop(t) ||
 929           queues->steal(worker_id, t)) {
 930         do_task<T>(q, cl, live_data, &t);
 931         work++;
 932       } else {
 933         break;
 934       }
 935     }
 936 
 937     if (work == 0) {
 938       // No work encountered in current stride, try to terminate.
 939       // Need to leave the STS here otherwise it might block safepoints.
 940       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 941       ShenandoahTerminatorTerminator tt(heap);
 942       if (terminator->offer_termination(&tt)) return;
 943     }
 944   }
 945 }
 946 
 947 bool ShenandoahConcurrentMark::claim_codecache() {
 948   return _claimed_codecache.try_set();
 949 }
 950 
 951 void ShenandoahConcurrentMark::clear_claim_codecache() {
 952   _claimed_codecache.unset();
 953 }
--- EOF ---