3646
3647 TaskQueueStats totals;
3648 const int n = workers() != NULL ? workers()->total_workers() : 1;
3649 for (int i = 0; i < n; ++i) {
3650 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3651 totals += task_queue(i)->stats;
3652 }
3653 st->print_raw("tot "); totals.print(st); st->cr();
3654
3655 DEBUG_ONLY(totals.verify());
3656 }
3657
3658 void G1CollectedHeap::reset_taskqueue_stats() {
3659 const int n = workers() != NULL ? workers()->total_workers() : 1;
3660 for (int i = 0; i < n; ++i) {
3661 task_queue(i)->stats.reset();
3662 }
3663 }
3664 #endif // TASKQUEUE_STATS
3665
3666 bool
3667 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3668 assert_at_safepoint(true /* should_be_vm_thread */);
3669 guarantee(!is_gc_active(), "collection is not reentrant");
3670
3671 if (GC_locker::check_active_before_gc()) {
3672 return false;
3673 }
3674
3675 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3676 ResourceMark rm;
3677
3678 print_heap_before_gc();
3679
3680 HRSPhaseSetter x(HRSPhaseEvacuation);
3681 verify_region_sets_optional();
3682 verify_dirty_young_regions();
3683
3684 // This call will decide whether this pause is an initial-mark
3685 // pause. If it is, during_initial_mark_pause() will return true
3688
3689 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3690 assert(!g1_policy()->during_initial_mark_pause() ||
3691 g1_policy()->gcs_are_young(), "sanity");
3692
3693 // We also do not allow mixed GCs during marking.
3694 assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3695
3696 // Record whether this pause is an initial mark. When the current
3697 // thread has completed its logging output and it's safe to signal
3698 // the CM thread, the flag's value in the policy has been reset.
3699 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3700
3701 // Inner scope for scope based logging, timers, and stats collection
3702 {
3703 if (g1_policy()->during_initial_mark_pause()) {
3704 // We are about to start a marking cycle, so we increment the
3705 // full collection counter.
3706 increment_old_marking_cycles_started();
3707 }
3708 // if the log level is "finer" is on, we'll print long statistics information
3709 // in the collector policy code, so let's not print this as the output
3710 // is messy if we do.
3711 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
3712 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3713
3714 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3715 workers()->active_workers() : 1);
3716 double pause_start_sec = os::elapsedTime();
3717 g1_policy()->phase_times()->note_gc_start(active_workers);
3718 bool initial_mark_gc = g1_policy()->during_initial_mark_pause();
3719
3720 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3721 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3722
3723 // If the secondary_free_list is not empty, append it to the
3724 // free_list. No need to wait for the cleanup operation to finish;
3725 // the region allocation code will check the secondary_free_list
3726 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3727 // set, skip this step so that the region allocation code has to
3728 // get entries from the secondary_free_list.
3729 if (!G1StressConcRegionFreeing) {
3730 append_secondary_free_list_if_not_empty_with_lock();
3731 }
3732
3733 assert(check_young_list_well_formed(),
3734 "young list should be well formed");
3735
3736 // Don't dynamically change the number of GC threads this early. A value of
3737 // 0 is used to indicate serial work. When parallel work is done,
3738 // it will be set.
3995
3996 // CM reference discovery will be re-enabled if necessary.
3997 }
3998
3999 // We should do this after we potentially expand the heap so
4000 // that all the COMMIT events are generated before the end GC
4001 // event, and after we retire the GC alloc regions so that all
4002 // RETIRE events are generated before the end GC event.
4003 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4004
4005 if (mark_in_progress()) {
4006 concurrent_mark()->update_g1_committed();
4007 }
4008
4009 #ifdef TRACESPINNING
4010 ParallelTaskTerminator::print_termination_counts();
4011 #endif
4012
4013 gc_epilogue(false);
4014
4015 if (G1Log::fine()) {
4016 if (PrintGCTimeStamps) {
4017 gclog_or_tty->stamp();
4018 gclog_or_tty->print(": ");
4019 }
4020
4021 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
4022 .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
4023 .append(initial_mark_gc ? " (initial-mark)" : "");
4024
4025 double pause_time_sec = os::elapsedTime() - pause_start_sec;
4026
4027 if (G1Log::finer()) {
4028 if (evacuation_failed()) {
4029 gc_cause_str.append(" (to-space exhausted)");
4030 }
4031 gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_sec);
4032 g1_policy()->phase_times()->note_gc_end();
4033 g1_policy()->phase_times()->print(pause_time_sec);
4034 g1_policy()->print_detailed_heap_transition();
4035 } else {
4036 if (evacuation_failed()) {
4037 gc_cause_str.append("--");
4038 }
4039 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
4040 g1_policy()->print_heap_transition();
4041 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
4042 }
4043 }
4044 }
4045
4046 // It is not yet to safe to tell the concurrent mark to
4047 // start as we have some optional output below. We don't want the
4048 // output from the concurrent mark thread interfering with this
4049 // logging output either.
4050
4051 _hrs.verify_optional();
4052 verify_region_sets_optional();
4053
4054 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4055 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4056
4057 print_heap_after_gc();
4058
4059 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4060 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4061 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4062 // before any GC notifications are raised.
4063 g1mm()->update_sizes();
|
3646
3647 TaskQueueStats totals;
3648 const int n = workers() != NULL ? workers()->total_workers() : 1;
3649 for (int i = 0; i < n; ++i) {
3650 st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3651 totals += task_queue(i)->stats;
3652 }
3653 st->print_raw("tot "); totals.print(st); st->cr();
3654
3655 DEBUG_ONLY(totals.verify());
3656 }
3657
3658 void G1CollectedHeap::reset_taskqueue_stats() {
3659 const int n = workers() != NULL ? workers()->total_workers() : 1;
3660 for (int i = 0; i < n; ++i) {
3661 task_queue(i)->stats.reset();
3662 }
3663 }
3664 #endif // TASKQUEUE_STATS
3665
3666 void G1CollectedHeap::log_gc_header() {
3667 if (!G1Log::fine()) {
3668 return;
3669 }
3670
3671 gclog_or_tty->date_stamp(PrintGCDateStamps);
3672 gclog_or_tty->stamp(PrintGCTimeStamps);
3673
3674 GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3675 .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
3676 .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3677
3678 gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3679 }
3680
3681 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3682 if (!G1Log::fine()) {
3683 return;
3684 }
3685
3686 if (G1Log::finer()) {
3687 if (evacuation_failed()) {
3688 gclog_or_tty->print(" (to-space exhausted)");
3689 }
3690 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3691 g1_policy()->phase_times()->note_gc_end();
3692 g1_policy()->phase_times()->print(pause_time_sec);
3693 g1_policy()->print_detailed_heap_transition();
3694 } else {
3695 if (evacuation_failed()) {
3696 gclog_or_tty->print("--");
3697 }
3698 g1_policy()->print_heap_transition();
3699 gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3700 }
3701 }
3702
3703 bool
3704 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3705 assert_at_safepoint(true /* should_be_vm_thread */);
3706 guarantee(!is_gc_active(), "collection is not reentrant");
3707
3708 if (GC_locker::check_active_before_gc()) {
3709 return false;
3710 }
3711
3712 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3713 ResourceMark rm;
3714
3715 print_heap_before_gc();
3716
3717 HRSPhaseSetter x(HRSPhaseEvacuation);
3718 verify_region_sets_optional();
3719 verify_dirty_young_regions();
3720
3721 // This call will decide whether this pause is an initial-mark
3722 // pause. If it is, during_initial_mark_pause() will return true
3725
3726 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3727 assert(!g1_policy()->during_initial_mark_pause() ||
3728 g1_policy()->gcs_are_young(), "sanity");
3729
3730 // We also do not allow mixed GCs during marking.
3731 assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3732
3733 // Record whether this pause is an initial mark. When the current
3734 // thread has completed its logging output and it's safe to signal
3735 // the CM thread, the flag's value in the policy has been reset.
3736 bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3737
3738 // Inner scope for scope based logging, timers, and stats collection
3739 {
3740 if (g1_policy()->during_initial_mark_pause()) {
3741 // We are about to start a marking cycle, so we increment the
3742 // full collection counter.
3743 increment_old_marking_cycles_started();
3744 }
3745 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3746
3747 int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3748 workers()->active_workers() : 1);
3749 double pause_start_sec = os::elapsedTime();
3750 g1_policy()->phase_times()->note_gc_start(active_workers);
3751 log_gc_header();
3752
3753 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3754 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3755
3756 // If the secondary_free_list is not empty, append it to the
3757 // free_list. No need to wait for the cleanup operation to finish;
3758 // the region allocation code will check the secondary_free_list
3759 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3760 // set, skip this step so that the region allocation code has to
3761 // get entries from the secondary_free_list.
3762 if (!G1StressConcRegionFreeing) {
3763 append_secondary_free_list_if_not_empty_with_lock();
3764 }
3765
3766 assert(check_young_list_well_formed(),
3767 "young list should be well formed");
3768
3769 // Don't dynamically change the number of GC threads this early. A value of
3770 // 0 is used to indicate serial work. When parallel work is done,
3771 // it will be set.
4028
4029 // CM reference discovery will be re-enabled if necessary.
4030 }
4031
4032 // We should do this after we potentially expand the heap so
4033 // that all the COMMIT events are generated before the end GC
4034 // event, and after we retire the GC alloc regions so that all
4035 // RETIRE events are generated before the end GC event.
4036 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4037
4038 if (mark_in_progress()) {
4039 concurrent_mark()->update_g1_committed();
4040 }
4041
4042 #ifdef TRACESPINNING
4043 ParallelTaskTerminator::print_termination_counts();
4044 #endif
4045
4046 gc_epilogue(false);
4047
4048 log_gc_footer(os::elapsedTime() - pause_start_sec);
4049 }
4050
4051 // It is not yet to safe to tell the concurrent mark to
4052 // start as we have some optional output below. We don't want the
4053 // output from the concurrent mark thread interfering with this
4054 // logging output either.
4055
4056 _hrs.verify_optional();
4057 verify_region_sets_optional();
4058
4059 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4060 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4061
4062 print_heap_after_gc();
4063
4064 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4065 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4066 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4067 // before any GC notifications are raised.
4068 g1mm()->update_sizes();
|