4197 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4198
4199 #ifdef TRACESPINNING
4200 ParallelTaskTerminator::print_termination_counts();
4201 #endif
4202
4203 gc_epilogue(false);
4204 }
4205
4206 // Print the remainder of the GC log output.
4207 log_gc_footer(os::elapsedTime() - pause_start_sec);
4208
4209 // It is not yet to safe to tell the concurrent mark to
4210 // start as we have some optional output below. We don't want the
4211 // output from the concurrent mark thread interfering with this
4212 // logging output either.
4213
4214 _hrm.verify_optional();
4215 verify_region_sets_optional();
4216
4217 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4218 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4219
4220 print_heap_after_gc();
4221 trace_heap_after_gc(_gc_tracer_stw);
4222
4223 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4224 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4225 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4226 // before any GC notifications are raised.
4227 g1mm()->update_sizes();
4228
4229 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4230 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4231 _gc_timer_stw->register_gc_end();
4232 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4233 }
4234 // It should now be safe to tell the concurrent mark thread to start
4235 // without its logging output interfering with the logging output
4236 // that came from the pause.
4237
4781 &push_heap_rs_cl,
4782 strong_cld_cl,
4783 weak_cld_cl,
4784 strong_code_cl,
4785 worker_id);
4786
4787 pss.end_strong_roots();
4788
4789 {
4790 double start = os::elapsedTime();
4791 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4792 evac.do_void();
4793 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4794 double term_ms = pss.term_time()*1000.0;
4795 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4796 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4797 }
4798 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4799 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4800
4801 if (ParallelGCVerbose) {
4802 MutexLocker x(stats_lock());
4803 pss.print_termination_stats(worker_id);
4804 }
4805
4806 assert(pss.queue_is_empty(), "should be empty");
4807
4808 // Close the inner scope so that the ResourceMark and HandleMark
4809 // destructors are executed here and are included as part of the
4810 // "GC Worker Time".
4811 }
4812
4813 double end_time_ms = os::elapsedTime() * 1000.0;
4814 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4815 }
4816 };
4817
4818 // *** Common G1 Evacuation Stuff
4819
4820 // This method is run in a GC worker.
4821
5872
5873 G1ParTask g1_par_task(this, _task_queues);
5874
5875 init_for_evac_failure(NULL);
5876
5877 rem_set()->prepare_for_younger_refs_iterate(true);
5878
5879 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5880 double start_par_time_sec = os::elapsedTime();
5881 double end_par_time_sec;
5882
5883 {
5884 StrongRootsScope srs(this);
5885 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5886 if (g1_policy()->during_initial_mark_pause()) {
5887 ClassLoaderDataGraph::clear_claimed_marks();
5888 }
5889
5890 if (G1CollectedHeap::use_parallel_gc_threads()) {
5891 // The individual threads will set their evac-failure closures.
5892 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5893 // These tasks use ShareHeap::_process_strong_tasks
5894 assert(UseDynamicNumberOfGCThreads ||
5895 workers()->active_workers() == workers()->total_workers(),
5896 "If not dynamic should be using all the workers");
5897 workers()->run_task(&g1_par_task);
5898 } else {
5899 g1_par_task.set_for_termination(n_workers);
5900 g1_par_task.work(0);
5901 }
5902 end_par_time_sec = os::elapsedTime();
5903
5904 // Closing the inner scope will execute the destructor
5905 // for the StrongRootsScope object. We record the current
5906 // elapsed time before closing the scope so that time
5907 // taken for the SRS destructor is NOT included in the
5908 // reported parallel time.
5909 }
5910
5911 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5912 g1_policy()->phase_times()->record_par_time(par_time_ms);
|
4197 _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4198
4199 #ifdef TRACESPINNING
4200 ParallelTaskTerminator::print_termination_counts();
4201 #endif
4202
4203 gc_epilogue(false);
4204 }
4205
4206 // Print the remainder of the GC log output.
4207 log_gc_footer(os::elapsedTime() - pause_start_sec);
4208
4209 // It is not yet to safe to tell the concurrent mark to
4210 // start as we have some optional output below. We don't want the
4211 // output from the concurrent mark thread interfering with this
4212 // logging output either.
4213
4214 _hrm.verify_optional();
4215 verify_region_sets_optional();
4216
4217 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4218 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4219
4220 print_heap_after_gc();
4221 trace_heap_after_gc(_gc_tracer_stw);
4222
4223 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4224 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4225 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4226 // before any GC notifications are raised.
4227 g1mm()->update_sizes();
4228
4229 _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4230 _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4231 _gc_timer_stw->register_gc_end();
4232 _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4233 }
4234 // It should now be safe to tell the concurrent mark thread to start
4235 // without its logging output interfering with the logging output
4236 // that came from the pause.
4237
4781 &push_heap_rs_cl,
4782 strong_cld_cl,
4783 weak_cld_cl,
4784 strong_code_cl,
4785 worker_id);
4786
4787 pss.end_strong_roots();
4788
4789 {
4790 double start = os::elapsedTime();
4791 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4792 evac.do_void();
4793 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4794 double term_ms = pss.term_time()*1000.0;
4795 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4796 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4797 }
4798 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4799 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4800
4801 if (PrintTerminationStats) {
4802 MutexLocker x(stats_lock());
4803 pss.print_termination_stats(worker_id);
4804 }
4805
4806 assert(pss.queue_is_empty(), "should be empty");
4807
4808 // Close the inner scope so that the ResourceMark and HandleMark
4809 // destructors are executed here and are included as part of the
4810 // "GC Worker Time".
4811 }
4812
4813 double end_time_ms = os::elapsedTime() * 1000.0;
4814 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4815 }
4816 };
4817
4818 // *** Common G1 Evacuation Stuff
4819
4820 // This method is run in a GC worker.
4821
5872
5873 G1ParTask g1_par_task(this, _task_queues);
5874
5875 init_for_evac_failure(NULL);
5876
5877 rem_set()->prepare_for_younger_refs_iterate(true);
5878
5879 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5880 double start_par_time_sec = os::elapsedTime();
5881 double end_par_time_sec;
5882
5883 {
5884 StrongRootsScope srs(this);
5885 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5886 if (g1_policy()->during_initial_mark_pause()) {
5887 ClassLoaderDataGraph::clear_claimed_marks();
5888 }
5889
5890 if (G1CollectedHeap::use_parallel_gc_threads()) {
5891 // The individual threads will set their evac-failure closures.
5892 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5893 // These tasks use ShareHeap::_process_strong_tasks
5894 assert(UseDynamicNumberOfGCThreads ||
5895 workers()->active_workers() == workers()->total_workers(),
5896 "If not dynamic should be using all the workers");
5897 workers()->run_task(&g1_par_task);
5898 } else {
5899 g1_par_task.set_for_termination(n_workers);
5900 g1_par_task.work(0);
5901 }
5902 end_par_time_sec = os::elapsedTime();
5903
5904 // Closing the inner scope will execute the destructor
5905 // for the StrongRootsScope object. We record the current
5906 // elapsed time before closing the scope so that time
5907 // taken for the SRS destructor is NOT included in the
5908 // reported parallel time.
5909 }
5910
5911 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5912 g1_policy()->phase_times()->record_par_time(par_time_ms);
|