157 TaskTerminator* _terminator;
158
159 public:
160 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
161 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
162 }
163
164 void work(uint worker_id) {
165 ShenandoahHeap* heap = ShenandoahHeap::heap();
166 ShenandoahConcurrentWorkerSession worker_session(worker_id);
167 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
168 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
169 ReferenceProcessor* rp;
170 if (heap->process_references()) {
171 rp = heap->ref_processor();
172 shenandoah_assert_rp_isalive_installed();
173 } else {
174 rp = NULL;
175 }
176
177 _cm->concurrent_scan_code_roots(worker_id, rp);
178 _cm->mark_loop(worker_id, _terminator, rp,
179 true, // cancellable
180 ShenandoahStringDedup::is_enabled()); // perform string dedup
181 }
182 };
183
184 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
185 private:
186 ShenandoahSATBBufferClosure* _satb_cl;
187 OopClosure* const _cl;
188 MarkingCodeBlobClosure* _code_cl;
189 uintx _claim_token;
190
191 public:
192 ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
193 _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
194 _claim_token(Threads::thread_claim_token()) {}
195
196 void do_thread(Thread* thread) {
197 if (thread->claim_threads_do(true, _claim_token)) {
198 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
199 if (thread->is_Java_thread()) {
200 if (_cl != NULL) {
201 ResourceMark rm;
202 thread->oops_do(_cl, _code_cl);
203 } else if (_code_cl != NULL) {
204 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
205 // however the liveness of oops reachable from nmethods have very complex lifecycles:
206 // * Alive if on the stack of an executing method
207 // * Weakly reachable otherwise
208 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
209 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
210 JavaThread* jt = (JavaThread*)thread;
211 jt->nmethods_do(_code_cl);
212 }
213 }
214 }
215 }
216 };
217
218 class ShenandoahFinalMarkingTask : public AbstractGangTask {
219 private:
220 ShenandoahConcurrentMark* _cm;
221 TaskTerminator* _terminator;
222 bool _dedup_string;
223
224 public:
225 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
226 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
227 }
228
229 void work(uint worker_id) {
230 ShenandoahHeap* heap = ShenandoahHeap::heap();
231
232 ShenandoahParallelWorkerSession worker_session(worker_id);
233 ReferenceProcessor* rp;
234 if (heap->process_references()) {
235 rp = heap->ref_processor();
236 shenandoah_assert_rp_isalive_installed();
237 } else {
250 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
251 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
252 bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
253 if (heap->has_forwarded_objects()) {
254 ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
255 MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
256 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
257 ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
258 do_nmethods ? &blobsCl : NULL);
259 Threads::threads_do(&tc);
260 } else {
261 ShenandoahMarkRefsClosure mark_cl(q, rp);
262 MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
263 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
264 ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
265 do_nmethods ? &blobsCl : NULL);
266 Threads::threads_do(&tc);
267 }
268 }
269
270 if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
271 // Full GC does not execute concurrent cycle.
272 // Degenerated cycle may bypass concurrent cycle.
273 // So code roots might not be scanned, let's scan here.
274 _cm->concurrent_scan_code_roots(worker_id, rp);
275 }
276
277 _cm->mark_loop(worker_id, _terminator, rp,
278 false, // not cancellable
279 _dedup_string);
280
281 assert(_cm->task_queues()->is_empty(), "Should be empty");
282 }
283 };
284
285 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
286 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
287 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
288
289 ShenandoahHeap* heap = ShenandoahHeap::heap();
290
291 ShenandoahGCPhase phase(root_phase);
292
293 WorkGang* workers = heap->workers();
294 uint nworkers = workers->active_workers();
295
296 assert(nworkers <= task_queues()->size(), "Just check");
297
298 ShenandoahRootScanner root_proc(nworkers, root_phase);
299 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
300 task_queues()->reserve(nworkers);
301
302 if (heap->has_forwarded_objects()) {
303 ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
304 workers->run_task(&mark_roots);
305 } else {
306 // No need to update references, which means the heap is stable.
307 // Can save time not walking through forwarding pointers.
308 ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
309 workers->run_task(&mark_roots);
310 }
311
312 clear_claim_codecache();
313 }
314
315 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
316 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
317 assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
318 root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
319 "Only for these phases");
320
321 ShenandoahGCPhase phase(root_phase);
322
323 bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
324
325 #if COMPILER2_OR_JVMCI
326 DerivedPointerTable::clear();
327 #endif
328
329 uint nworkers = _heap->workers()->active_workers();
330
331 ShenandoahRootUpdater root_updater(nworkers, root_phase);
332 ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
373
374 #if COMPILER2_OR_JVMCI
375 DerivedPointerTable::update_pointers();
376 #endif
377 }
378
379 void ShenandoahConcurrentMark::initialize(uint workers) {
380 _heap = ShenandoahHeap::heap();
381
382 uint num_queues = MAX2(workers, 1U);
383
384 _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
385
386 for (uint i = 0; i < num_queues; ++i) {
387 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
388 task_queue->initialize();
389 _task_queues->register_queue(i, task_queue);
390 }
391 }
392
393 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
394 if (_heap->unload_classes()) {
395 return;
396 }
397
398 if (claim_codecache()) {
399 ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
400 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
401 // TODO: We can not honor StringDeduplication here, due to lock ranking
402 // inversion. So, we may miss some deduplication candidates.
403 if (_heap->has_forwarded_objects()) {
404 ShenandoahMarkResolveRefsClosure cl(q, rp);
405 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
406 CodeCache::blobs_do(&blobs);
407 } else {
408 ShenandoahMarkRefsClosure cl(q, rp);
409 CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
410 CodeCache::blobs_do(&blobs);
411 }
412 }
413 }
414
415 void ShenandoahConcurrentMark::mark_from_roots() {
416 WorkGang* workers = _heap->workers();
417 uint nworkers = workers->active_workers();
418
419 if (_heap->process_references()) {
420 ReferenceProcessor* rp = _heap->ref_processor();
421 rp->set_active_mt_degree(nworkers);
422
423 // enable ("weak") refs discovery
424 rp->enable_discovery(true /*verify_no_refs*/);
425 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
426 }
427
428 shenandoah_assert_rp_isalive_not_installed();
429 ShenandoahIsAliveSelector is_alive;
430 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
431
432 task_queues()->reserve(nworkers);
433
434 {
435 TaskTerminator terminator(nworkers, task_queues());
436 ShenandoahConcurrentMarkingTask task(this, &terminator);
437 workers->run_task(&task);
438 }
439
440 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
441 }
442
443 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
444 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
445
446 uint nworkers = _heap->workers()->active_workers();
447
448 // Finally mark everything else we've got in our queues during the previous steps.
449 // It does two different things for concurrent vs. mark-compact GC:
450 // - For concurrent GC, it starts with empty task queues, drains the remaining
451 // SATB buffers, and then completes the marking closure.
452 // - For mark-compact GC, it starts out with the task queues seeded by initial
453 // root scan, and completes the closure, thus marking through all live objects
454 // The implementation is the same, so it's shared here.
455 {
456 ShenandoahGCPhase phase(full_gc ?
457 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
458 ShenandoahPhaseTimings::finish_queues);
459 task_queues()->reserve(nworkers);
460
461 shenandoah_assert_rp_isalive_not_installed();
462 ShenandoahIsAliveSelector is_alive;
463 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
464
465 StrongRootsScope scope(nworkers);
466 TaskTerminator terminator(nworkers, task_queues());
467 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
468 _heap->workers()->run_task(&task);
469 }
470
471 assert(task_queues()->is_empty(), "Should be empty");
472
473 // When we're done marking everything, we process weak references.
474 if (_heap->process_references()) {
475 weak_refs_work(full_gc);
476 }
477
478 assert(task_queues()->is_empty(), "Should be empty");
479 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
480 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
481 }
482
483 // Weak Reference Closures
484 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
485 uint _worker_id;
486 TaskTerminator* _terminator;
487 bool _reset_terminator;
488
489 public:
490 ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
491 _worker_id(worker_id),
924
925 uint work = 0;
926 for (uint i = 0; i < stride; i++) {
927 if (q->pop(t) ||
928 queues->steal(worker_id, t)) {
929 do_task<T>(q, cl, live_data, &t);
930 work++;
931 } else {
932 break;
933 }
934 }
935
936 if (work == 0) {
937 // No work encountered in current stride, try to terminate.
938 // Need to leave the STS here otherwise it might block safepoints.
939 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
940 ShenandoahTerminatorTerminator tt(heap);
941 if (terminator->offer_termination(&tt)) return;
942 }
943 }
944 }
945
946 bool ShenandoahConcurrentMark::claim_codecache() {
947 return _claimed_codecache.try_set();
948 }
949
950 void ShenandoahConcurrentMark::clear_claim_codecache() {
951 _claimed_codecache.unset();
952 }
|
157 TaskTerminator* _terminator;
158
159 public:
160 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
161 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
162 }
163
164 void work(uint worker_id) {
165 ShenandoahHeap* heap = ShenandoahHeap::heap();
166 ShenandoahConcurrentWorkerSession worker_session(worker_id);
167 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
168 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
169 ReferenceProcessor* rp;
170 if (heap->process_references()) {
171 rp = heap->ref_processor();
172 shenandoah_assert_rp_isalive_installed();
173 } else {
174 rp = NULL;
175 }
176
177 _cm->mark_loop(worker_id, _terminator, rp,
178 true, // cancellable
179 ShenandoahStringDedup::is_enabled()); // perform string dedup
180 }
181 };
182
183 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
184 private:
185 ShenandoahSATBBufferClosure* _satb_cl;
186 OopClosure* const _cl;
187 MarkingCodeBlobClosure* _code_cl;
188 uintx _claim_token;
189
190 public:
191 ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
192 _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
193 _claim_token(Threads::thread_claim_token()) {}
194
195 void do_thread(Thread* thread) {
196 if (thread->claim_threads_do(true, _claim_token)) {
197 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
198 if (thread->is_Java_thread()) {
199 if (_cl != NULL) {
200 ResourceMark rm;
201 thread->oops_do(_cl, _code_cl);
202 } else if (_code_cl != NULL) {
203 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
204 // however the liveness of oops reachable from nmethods have very complex lifecycles:
205 // * Alive if on the stack of an executing method
206 // * Weakly reachable otherwise
207 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
208 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
209 JavaThread* jt = (JavaThread*)thread;
210 jt->nmethods_do(_code_cl);
211 }
212 }
213 }
214 }
215 };
216
217 template <bool CONCURRENT, bool SINGLE_THREADED>
218 class ShenandoahConcurrentRootsIterator {
219 private:
220 ShenandoahVMRoots<CONCURRENT> _vm_roots;
221 ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>
222 _cld_roots;
223 ShenandoahNMethodTableSnapshot* _codecache_snapshot;
224 ShenandoahPhaseTimings::Phase _phase;
225
226 public:
227 ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase);
228 ~ShenandoahConcurrentRootsIterator();
229
230 void oops_do(OopClosure* oops, uint worker_id);
231 };
232
233 template <bool CONCURRENT, bool SINGLE_THREADED>
234 ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase) :
235 _vm_roots(phase),
236 _cld_roots(phase),
237 _codecache_snapshot(NULL),
238 _phase(phase) {
239 if (!ShenandoahHeap::heap()->unload_classes()) {
240 if (CONCURRENT) {
241 CodeCache_lock->lock_without_safepoint_check();
242 } else {
243 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
244 }
245 _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
246 }
247 assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
248 }
249
250 template <bool CONCURRENT, bool SINGLE_THREADED>
251 ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::~ShenandoahConcurrentRootsIterator() {
252 if (!ShenandoahHeap::heap()->unload_classes()) {
253 ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
254 if (CONCURRENT) {
255 CodeCache_lock->unlock();
256 }
257 }
258 }
259
260 template <bool CONCURRENT, bool SINGLE_THREADED>
261 void ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::oops_do(OopClosure* oops, uint worker_id) {
262 ShenandoahHeap* const heap = ShenandoahHeap::heap();
263 CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
264 _vm_roots.oops_do(oops, worker_id);
265
266 if (!heap->unload_classes()) {
267 _cld_roots.cld_do(&clds_cl, worker_id);
268
269 ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
270 CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
271 _codecache_snapshot->parallel_blobs_do(&blobs);
272 } else {
273 _cld_roots.always_strong_cld_do(&clds_cl, worker_id);
274 }
275 }
276
277 // Process concurrent roots at safepoints
278 template <typename CLOSURE>
279 class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
280 private:
281 ShenandoahConcurrentRootsIterator<false /* concurrent */, false /* single_thread */> _itr;
282 ShenandoahConcurrentMark* const _cm;
283 ReferenceProcessor* _rp;
284 public:
285
286 ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
287 ShenandoahPhaseTimings::Phase phase);
288 void work(uint worker_id);
289 };
290
291 template <typename CLOSURE>
292 ShenandoahProcessConcurrentRootsTask<CLOSURE>::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
293 ShenandoahPhaseTimings::Phase phase) :
294 AbstractGangTask("Shenandoah STW Concurrent Mark Task"),
295 _itr(phase),
296 _cm(cm),
297 _rp(NULL) {
298 ShenandoahHeap* heap = ShenandoahHeap::heap();
299 if (heap->process_references()) {
300 _rp = heap->ref_processor();
301 shenandoah_assert_rp_isalive_installed();
302 }
303 }
304
305 template <typename CLOSURE>
306 void ShenandoahProcessConcurrentRootsTask<CLOSURE>::work(uint worker_id) {
307 ShenandoahParallelWorkerSession worker_session(worker_id);
308 ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id);
309 CLOSURE cl(q, _rp);
310 _itr.oops_do(&cl, worker_id);
311 }
312
313
314 class ShenandoahFinalMarkingTask : public AbstractGangTask {
315 private:
316 ShenandoahConcurrentMark* _cm;
317 TaskTerminator* _terminator;
318 bool _dedup_string;
319
320 public:
321 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
322 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
323 }
324
325 void work(uint worker_id) {
326 ShenandoahHeap* heap = ShenandoahHeap::heap();
327
328 ShenandoahParallelWorkerSession worker_session(worker_id);
329 ReferenceProcessor* rp;
330 if (heap->process_references()) {
331 rp = heap->ref_processor();
332 shenandoah_assert_rp_isalive_installed();
333 } else {
346 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
347 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
348 bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
349 if (heap->has_forwarded_objects()) {
350 ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
351 MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
352 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
353 ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL,
354 do_nmethods ? &blobsCl : NULL);
355 Threads::threads_do(&tc);
356 } else {
357 ShenandoahMarkRefsClosure mark_cl(q, rp);
358 MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
359 ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
360 ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL,
361 do_nmethods ? &blobsCl : NULL);
362 Threads::threads_do(&tc);
363 }
364 }
365
366 _cm->mark_loop(worker_id, _terminator, rp,
367 false, // not cancellable
368 _dedup_string);
369
370 assert(_cm->task_queues()->is_empty(), "Should be empty");
371 }
372 };
373
374 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
375 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
376 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
377
378 ShenandoahHeap* heap = ShenandoahHeap::heap();
379
380 ShenandoahGCPhase phase(root_phase);
381
382 WorkGang* workers = heap->workers();
383 uint nworkers = workers->active_workers();
384
385 assert(nworkers <= task_queues()->size(), "Just check");
386
387 ShenandoahRootScanner root_proc(nworkers, root_phase);
388 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
389 task_queues()->reserve(nworkers);
390
391 if (heap->has_forwarded_objects()) {
392 ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
393 workers->run_task(&mark_roots);
394 } else {
395 // No need to update references, which means the heap is stable.
396 // Can save time not walking through forwarding pointers.
397 ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
398 workers->run_task(&mark_roots);
399 }
400 }
401
402 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
403 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
404 assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
405 root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
406 "Only for these phases");
407
408 ShenandoahGCPhase phase(root_phase);
409
410 bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
411
412 #if COMPILER2_OR_JVMCI
413 DerivedPointerTable::clear();
414 #endif
415
416 uint nworkers = _heap->workers()->active_workers();
417
418 ShenandoahRootUpdater root_updater(nworkers, root_phase);
419 ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
460
461 #if COMPILER2_OR_JVMCI
462 DerivedPointerTable::update_pointers();
463 #endif
464 }
465
466 void ShenandoahConcurrentMark::initialize(uint workers) {
467 _heap = ShenandoahHeap::heap();
468
469 uint num_queues = MAX2(workers, 1U);
470
471 _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
472
473 for (uint i = 0; i < num_queues; ++i) {
474 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
475 task_queue->initialize();
476 _task_queues->register_queue(i, task_queue);
477 }
478 }
479
480 // Mark concurrent roots during concurrent phases
481 class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
482 private:
483 SuspendibleThreadSetJoiner _sts_joiner;
484 ShenandoahConcurrentRootsIterator<true /* concurrent */, false /* single-threaded */> _itr;
485 ShenandoahObjToScanQueueSet* const _queue_set;
486 ReferenceProcessor* const _rp;
487
488 public:
489 ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
490 ReferenceProcessor* rp,
491 ShenandoahPhaseTimings::Phase phase);
492 void work(uint worker_id);
493 };
494
495 ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
496 ReferenceProcessor* rp,
497 ShenandoahPhaseTimings::Phase phase) :
498 AbstractGangTask("Shenandoah Concurrent Mark Task"),
499 _itr(phase),
500 _queue_set(qs),
501 _rp(rp) {
502 assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
503 }
504
505 void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
506 ShenandoahConcurrentWorkerSession worker_session(worker_id);
507 ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
508 ShenandoahMarkResolveRefsClosure cl(q, _rp);
509 _itr.oops_do(&cl, worker_id);
510 }
511
512 void ShenandoahConcurrentMark::mark_from_roots() {
513 WorkGang* workers = _heap->workers();
514 uint nworkers = workers->active_workers();
515
516 ReferenceProcessor* rp = NULL;
517 if (_heap->process_references()) {
518 rp = _heap->ref_processor();
519 rp->set_active_mt_degree(nworkers);
520
521 // enable ("weak") refs discovery
522 rp->enable_discovery(true /*verify_no_refs*/);
523 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
524 }
525
526 shenandoah_assert_rp_isalive_not_installed();
527 ShenandoahIsAliveSelector is_alive;
528 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
529
530 task_queues()->reserve(nworkers);
531
532 {
533 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots);
534 // Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock
535 ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots);
536 workers->run_task(&task);
537 }
538
539 {
540 TaskTerminator terminator(nworkers, task_queues());
541 ShenandoahConcurrentMarkingTask task(this, &terminator);
542 workers->run_task(&task);
543 }
544
545 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
546 }
547
548 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
549 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
550
551 uint nworkers = _heap->workers()->active_workers();
552
553 {
554 shenandoah_assert_rp_isalive_not_installed();
555 ShenandoahIsAliveSelector is_alive;
556 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
557
558
559 // Full GC does not execute concurrent cycle.
560 // Degenerated cycle may bypass concurrent cycle.
561 // So concurrent roots might not be scanned, scan them here.
562 // Ideally, this should be piggyback to ShenandoahFinalMarkingTask, but it makes time tracking
563 // very hard. Given full GC and degenerated GC should be rare, let's use separate task.
564 if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) {
565 ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ?
566 ShenandoahPhaseTimings::full_gc_scan_conc_roots :
567 ShenandoahPhaseTimings::degen_gc_scan_conc_roots;
568 ShenandoahGCPhase gc_phase(phase);
569 if (_heap->has_forwarded_objects()) {
570 ShenandoahProcessConcurrentRootsTask<ShenandoahMarkResolveRefsClosure> task(this, phase);
571 _heap->workers()->run_task(&task);
572 } else {
573 ShenandoahProcessConcurrentRootsTask<ShenandoahMarkRefsClosure> task(this, phase);
574 _heap->workers()->run_task(&task);
575 }
576 }
577
578
579 // Finally mark everything else we've got in our queues during the previous steps.
580 // It does two different things for concurrent vs. mark-compact GC:
581 // - For concurrent GC, it starts with empty task queues, drains the remaining
582 // SATB buffers, and then completes the marking closure.
583 // - For mark-compact GC, it starts out with the task queues seeded by initial
584 // root scan, and completes the closure, thus marking through all live objects
585 // The implementation is the same, so it's shared here.
586 {
587 ShenandoahGCPhase phase(full_gc ?
588 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
589 ShenandoahPhaseTimings::finish_queues);
590 task_queues()->reserve(nworkers);
591
592 StrongRootsScope scope(nworkers);
593 TaskTerminator terminator(nworkers, task_queues());
594 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
595 _heap->workers()->run_task(&task);
596 }
597
598 assert(task_queues()->is_empty(), "Should be empty");
599 }
600
601 // When we're done marking everything, we process weak references.
602 if (_heap->process_references()) {
603 weak_refs_work(full_gc);
604 }
605
606 assert(task_queues()->is_empty(), "Should be empty");
607 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
608 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
609 }
610
611 // Weak Reference Closures
612 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
613 uint _worker_id;
614 TaskTerminator* _terminator;
615 bool _reset_terminator;
616
617 public:
618 ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
619 _worker_id(worker_id),
1052
1053 uint work = 0;
1054 for (uint i = 0; i < stride; i++) {
1055 if (q->pop(t) ||
1056 queues->steal(worker_id, t)) {
1057 do_task<T>(q, cl, live_data, &t);
1058 work++;
1059 } else {
1060 break;
1061 }
1062 }
1063
1064 if (work == 0) {
1065 // No work encountered in current stride, try to terminate.
1066 // Need to leave the STS here otherwise it might block safepoints.
1067 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1068 ShenandoahTerminatorTerminator tt(heap);
1069 if (terminator->offer_termination(&tt)) return;
1070 }
1071 }
1072 }
|