387 uint nworkers = workers->active_workers();
388
389 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
390
391 if (_heap->process_references()) {
392 ReferenceProcessor* rp = _heap->ref_processor();
393 rp->set_active_mt_degree(nworkers);
394
395 // enable ("weak") refs discovery
396 rp->enable_discovery(true /*verify_no_refs*/);
397 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
398 }
399
400 shenandoah_assert_rp_isalive_not_installed();
401 ShenandoahIsAliveSelector is_alive;
402 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
403
404 task_queues()->reserve(nworkers);
405
406 {
407 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
408 TaskTerminator terminator(nworkers, task_queues());
409 ShenandoahConcurrentMarkingTask task(this, &terminator);
410 workers->run_task(&task);
411 }
412
413 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
414 }
415
416 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
417 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
418
419 uint nworkers = _heap->workers()->active_workers();
420
421 // Finally mark everything else we've got in our queues during the previous steps.
422 // It does two different things for concurrent vs. mark-compact GC:
423 // - For concurrent GC, it starts with empty task queues, drains the remaining
424 // SATB buffers, and then completes the marking closure.
425 // - For mark-compact GC, it starts out with the task queues seeded by initial
426 // root scan, and completes the closure, thus marking through all live objects
427 // The implementation is the same, so it's shared here.
428 {
429 ShenandoahGCPhase phase(full_gc ?
430 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
431 ShenandoahPhaseTimings::finish_queues);
432 task_queues()->reserve(nworkers);
433
434 shenandoah_assert_rp_isalive_not_installed();
435 ShenandoahIsAliveSelector is_alive;
436 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
437
438 ShenandoahTerminationTracker termination_tracker(full_gc ?
439 ShenandoahPhaseTimings::full_gc_mark_termination :
440 ShenandoahPhaseTimings::termination);
441
442 StrongRootsScope scope(nworkers);
443 TaskTerminator terminator(nworkers, task_queues());
444 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
445 _heap->workers()->run_task(&task);
446 }
447
448 assert(task_queues()->is_empty(), "Should be empty");
449
450 // When we're done marking everything, we process weak references.
451 if (_heap->process_references()) {
452 weak_refs_work(full_gc);
453 }
454
455 assert(task_queues()->is_empty(), "Should be empty");
456 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
457 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
458 }
459
460 // Weak Reference Closures
461 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
619 ReferenceProcessor* rp = _heap->ref_processor();
620
621 // NOTE: We cannot shortcut on has_discovered_references() here, because
622 // we will miss marking JNI Weak refs then, see implementation in
623 // ReferenceProcessor::process_discovered_references.
624 weak_refs_work_doit(full_gc);
625
626 rp->verify_no_references_recorded();
627 assert(!rp->discovery_enabled(), "Post condition");
628
629 }
630
631 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
632 ReferenceProcessor* rp = _heap->ref_processor();
633
634 ShenandoahPhaseTimings::Phase phase_process =
635 full_gc ?
636 ShenandoahPhaseTimings::full_gc_weakrefs_process :
637 ShenandoahPhaseTimings::weakrefs_process;
638
639 ShenandoahPhaseTimings::Phase phase_process_termination =
640 full_gc ?
641 ShenandoahPhaseTimings::full_gc_weakrefs_termination :
642 ShenandoahPhaseTimings::weakrefs_termination;
643
644 shenandoah_assert_rp_isalive_not_installed();
645 ShenandoahIsAliveSelector is_alive;
646 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
647
648 WorkGang* workers = _heap->workers();
649 uint nworkers = workers->active_workers();
650
651 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
652 rp->set_active_mt_degree(nworkers);
653
654 assert(task_queues()->is_empty(), "Should be empty");
655
656 // complete_gc and keep_alive closures instantiated here are only needed for
657 // single-threaded path in RP. They share the queue 0 for tracking work, which
658 // simplifies implementation. Since RP may decide to call complete_gc several
659 // times, we need to be able to reuse the terminator.
660 uint serial_worker_id = 0;
661 TaskTerminator terminator(1, task_queues());
662 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
663
664 ShenandoahRefProcTaskExecutor executor(workers);
665
666 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
667
668 {
669 ShenandoahGCPhase phase(phase_process);
670 ShenandoahTerminationTracker phase_term(phase_process_termination);
671
672 if (_heap->has_forwarded_objects()) {
673 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
674 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
675 &complete_gc, &executor,
676 &pt);
677
678 } else {
679 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
680 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
681 &complete_gc, &executor,
682 &pt);
683
684 }
685
686 pt.print_all_references();
687
688 assert(task_queues()->is_empty(), "Should be empty");
689 }
690 }
925
926 while (satb_mq_set.completed_buffers_num() > 0) {
927 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
928 }
929
930 uint work = 0;
931 for (uint i = 0; i < stride; i++) {
932 if (q->pop(t) ||
933 queues->steal(worker_id, t)) {
934 do_task<T>(q, cl, live_data, &t);
935 work++;
936 } else {
937 break;
938 }
939 }
940
941 if (work == 0) {
942 // No work encountered in current stride, try to terminate.
943 // Need to leave the STS here otherwise it might block safepoints.
944 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
945 ShenandoahTerminationTimingsTracker term_tracker(worker_id);
946 ShenandoahTerminatorTerminator tt(heap);
947 if (terminator->offer_termination(&tt)) return;
948 }
949 }
950 }
951
952 bool ShenandoahConcurrentMark::claim_codecache() {
953 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
954 return _claimed_codecache.try_set();
955 }
956
957 void ShenandoahConcurrentMark::clear_claim_codecache() {
958 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
959 _claimed_codecache.unset();
960 }
|
387 uint nworkers = workers->active_workers();
388
389 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
390
391 if (_heap->process_references()) {
392 ReferenceProcessor* rp = _heap->ref_processor();
393 rp->set_active_mt_degree(nworkers);
394
395 // enable ("weak") refs discovery
396 rp->enable_discovery(true /*verify_no_refs*/);
397 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
398 }
399
400 shenandoah_assert_rp_isalive_not_installed();
401 ShenandoahIsAliveSelector is_alive;
402 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
403
404 task_queues()->reserve(nworkers);
405
406 {
407 TaskTerminator terminator(nworkers, task_queues());
408 ShenandoahConcurrentMarkingTask task(this, &terminator);
409 workers->run_task(&task);
410 }
411
412 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
413 }
414
415 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
416 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
417
418 uint nworkers = _heap->workers()->active_workers();
419
420 // Finally mark everything else we've got in our queues during the previous steps.
421 // It does two different things for concurrent vs. mark-compact GC:
422 // - For concurrent GC, it starts with empty task queues, drains the remaining
423 // SATB buffers, and then completes the marking closure.
424 // - For mark-compact GC, it starts out with the task queues seeded by initial
425 // root scan, and completes the closure, thus marking through all live objects
426 // The implementation is the same, so it's shared here.
427 {
428 ShenandoahGCPhase phase(full_gc ?
429 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
430 ShenandoahPhaseTimings::finish_queues);
431 task_queues()->reserve(nworkers);
432
433 shenandoah_assert_rp_isalive_not_installed();
434 ShenandoahIsAliveSelector is_alive;
435 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
436
437 StrongRootsScope scope(nworkers);
438 TaskTerminator terminator(nworkers, task_queues());
439 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
440 _heap->workers()->run_task(&task);
441 }
442
443 assert(task_queues()->is_empty(), "Should be empty");
444
445 // When we're done marking everything, we process weak references.
446 if (_heap->process_references()) {
447 weak_refs_work(full_gc);
448 }
449
450 assert(task_queues()->is_empty(), "Should be empty");
451 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
452 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
453 }
454
455 // Weak Reference Closures
456 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
614 ReferenceProcessor* rp = _heap->ref_processor();
615
616 // NOTE: We cannot shortcut on has_discovered_references() here, because
617 // we will miss marking JNI Weak refs then, see implementation in
618 // ReferenceProcessor::process_discovered_references.
619 weak_refs_work_doit(full_gc);
620
621 rp->verify_no_references_recorded();
622 assert(!rp->discovery_enabled(), "Post condition");
623
624 }
625
626 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
627 ReferenceProcessor* rp = _heap->ref_processor();
628
629 ShenandoahPhaseTimings::Phase phase_process =
630 full_gc ?
631 ShenandoahPhaseTimings::full_gc_weakrefs_process :
632 ShenandoahPhaseTimings::weakrefs_process;
633
634 shenandoah_assert_rp_isalive_not_installed();
635 ShenandoahIsAliveSelector is_alive;
636 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
637
638 WorkGang* workers = _heap->workers();
639 uint nworkers = workers->active_workers();
640
641 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
642 rp->set_active_mt_degree(nworkers);
643
644 assert(task_queues()->is_empty(), "Should be empty");
645
646 // complete_gc and keep_alive closures instantiated here are only needed for
647 // single-threaded path in RP. They share the queue 0 for tracking work, which
648 // simplifies implementation. Since RP may decide to call complete_gc several
649 // times, we need to be able to reuse the terminator.
650 uint serial_worker_id = 0;
651 TaskTerminator terminator(1, task_queues());
652 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
653
654 ShenandoahRefProcTaskExecutor executor(workers);
655
656 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
657
658 {
659 ShenandoahGCPhase phase(phase_process);
660
661 if (_heap->has_forwarded_objects()) {
662 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
663 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
664 &complete_gc, &executor,
665 &pt);
666
667 } else {
668 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
669 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
670 &complete_gc, &executor,
671 &pt);
672
673 }
674
675 pt.print_all_references();
676
677 assert(task_queues()->is_empty(), "Should be empty");
678 }
679 }
914
915 while (satb_mq_set.completed_buffers_num() > 0) {
916 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
917 }
918
919 uint work = 0;
920 for (uint i = 0; i < stride; i++) {
921 if (q->pop(t) ||
922 queues->steal(worker_id, t)) {
923 do_task<T>(q, cl, live_data, &t);
924 work++;
925 } else {
926 break;
927 }
928 }
929
930 if (work == 0) {
931 // No work encountered in current stride, try to terminate.
932 // Need to leave the STS here otherwise it might block safepoints.
933 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
934 ShenandoahTerminatorTerminator tt(heap);
935 if (terminator->offer_termination(&tt)) return;
936 }
937 }
938 }
939
940 bool ShenandoahConcurrentMark::claim_codecache() {
941 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
942 return _claimed_codecache.try_set();
943 }
944
945 void ShenandoahConcurrentMark::clear_claim_codecache() {
946 assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
947 _claimed_codecache.unset();
948 }
|