< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 60584 : imported patch 8245511-ihop


  40 #include "gc/g1/g1Policy.hpp"
  41 #include "gc/g1/g1SurvivorRegions.hpp"
  42 #include "gc/g1/g1YoungGenSizer.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionRemSet.hpp"
  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/growableArray.hpp"
  53 #include "utilities/pair.hpp"
  54 
  55 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  56   _predictor(G1ConfidencePercent / 100.0),
  57   _analytics(new G1Analytics(&_predictor)),
  58   _remset_tracker(),
  59   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  60   _ihop_control(create_ihop_control(&_predictor)),

  61   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  62   _full_collection_start_sec(0.0),
  63   _young_list_target_length(0),
  64   _young_list_fixed_length(0),
  65   _young_list_max_length(0),
  66   _eden_surv_rate_group(new G1SurvRateGroup()),
  67   _survivor_surv_rate_group(new G1SurvRateGroup()),
  68   _reserve_factor((double) G1ReservePercent / 100.0),
  69   _reserve_regions(0),
  70   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  71   _free_regions_at_end_of_collection(0),
  72   _rs_length(0),
  73   _rs_length_prediction(0),
  74   _pending_cards_at_gc_start(0),
  75   _old_gen_alloc_tracker(),
  76   _concurrent_start_to_mixed(),
  77   _collection_set(NULL),
  78   _g1h(NULL),
  79   _phase_times_timer(gc_timer),
  80   _phase_times(NULL),
  81   _mark_remark_start_sec(0),
  82   _mark_cleanup_start_sec(0),
  83   _tenuring_threshold(MaxTenuringThreshold),
  84   _max_survivor_regions(0),
  85   _survivors_age_table(true)
  86 {
  87 }
  88 
  89 G1Policy::~G1Policy() {
  90   delete _ihop_control;
  91   delete _young_gen_sizer;
  92 }
  93 
  94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  95   if (G1Arguments::is_heterogeneous_heap()) {


 452 
 453   collector_state()->set_in_full_gc(false);
 454 
 455   // "Nuke" the heuristics that control the young/mixed GC
 456   // transitions and make sure we start with young GCs after the Full GC.
 457   collector_state()->set_in_young_only_phase(true);
 458   collector_state()->set_in_young_gc_before_mixed(false);
 459   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 460   collector_state()->set_in_concurrent_start_gc(false);
 461   collector_state()->set_mark_or_rebuild_in_progress(false);
 462   collector_state()->set_clearing_next_bitmap(false);
 463 
 464   _eden_surv_rate_group->start_adding_regions();
 465   // also call this on any additional surv rate groups
 466 
 467   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 468   _survivor_surv_rate_group->reset();
 469   update_young_list_max_and_target_length();
 470   update_rs_length_prediction();
 471 
 472   _old_gen_alloc_tracker.reset_after_full_gc();
 473 
 474   record_pause(FullGC, _full_collection_start_sec, end_sec);
 475 }
 476 
 477 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 478   log_debug(gc, refine, stats)
 479            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 480             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 481             kind,
 482             stats.refinement_time().seconds() * MILLIUNITS,
 483             stats.refined_cards(),
 484             stats.precleaned_cards(),
 485             stats.dirtied_cards());
 486 }
 487 
 488 void G1Policy::record_concurrent_refinement_stats() {
 489   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 490   _pending_cards_at_gc_start = dcqs.num_cards();
 491 
 492   // Collect per-thread stats, mostly from mutator activity.


 787 
 788   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 789          "If the last pause has been concurrent start, we should not have been in the marking window");
 790   if (is_concurrent_start_pause(this_pause)) {
 791     collector_state()->set_mark_or_rebuild_in_progress(true);
 792   }
 793 
 794   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 795 
 796   update_rs_length_prediction();
 797 
 798   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 799   // that in this case we are not running in a "normal" operating mode.
 800   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 801     // IHOP control wants to know the expected young gen length if it were not
 802     // restrained by the heap reserve. Using the actual length would make the
 803     // prediction too small and the limit the young gen every time we get to the
 804     // predicted target occupancy.
 805     size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 806 
 807     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 808     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 809                            _old_gen_alloc_tracker.last_cycle_old_bytes(),
 810                            last_unrestrained_young_length * HeapRegion::GrainBytes,
 811                            is_young_only_pause(this_pause));
 812 
 813     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 814   } else {
 815     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 816     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 817     // the marking threads may have received an uncharacterisic amount of cpu time
 818     // for completing the marking, i.e. are faster than expected.
 819     // This skews the predicted marking length towards smaller values which might cause
 820     // the mark start being too late.
 821     _concurrent_start_to_mixed.reset();
 822   }
 823 
 824   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 825   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 826 
 827   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 828     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 829                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 830                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
 831 
 832     scan_logged_cards_time_goal_ms = 0;
 833   } else {
 834     scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
 835   }
 836 
 837   double const logged_cards_time = logged_cards_processing_time();
 838 
 839   log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
 840                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 841 
 842   _g1h->concurrent_refine()->adjust(logged_cards_time,
 843                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 844                                     scan_logged_cards_time_goal_ms);
 845 }
 846 
 847 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){

 848   if (G1UseAdaptiveIHOP) {
 849     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,

 850                                      predictor,
 851                                      G1ReservePercent,
 852                                      G1HeapWastePercent);
 853   } else {
 854     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
 855   }
 856 }
 857 
 858 void G1Policy::update_ihop_prediction(double mutator_time_s,
 859                                       size_t mutator_alloc_bytes,
 860                                       size_t young_gen_size,
 861                                       bool this_gc_was_young_only) {
 862   // Always try to update IHOP prediction. Even evacuation failures give information
 863   // about e.g. whether to start IHOP earlier next time.
 864 
 865   // Avoid using really small application times that might create samples with
 866   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 867   double const min_valid_time = 1e-6;
 868 
 869   bool report = false;
 870 
 871   double marking_to_mixed_time = -1.0;
 872   if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
 873     marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
 874     assert(marking_to_mixed_time > 0.0,
 875            "Concurrent start to mixed time must be larger than zero but is %.3f",
 876            marking_to_mixed_time);
 877     if (marking_to_mixed_time > min_valid_time) {
 878       _ihop_control->update_marking_length(marking_to_mixed_time);
 879       report = true;
 880     }
 881   }
 882 
 883   // As an approximation for the young gc promotion rates during marking we use
 884   // all of them. In many applications there are only a few if any young gcs during
 885   // marking, which makes any prediction useless. This increases the accuracy of the
 886   // prediction.
 887   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
 888     _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
 889     report = true;
 890   }
 891 
 892   if (report) {
 893     report_ihop_statistics();
 894   }
 895 }
 896 
 897 void G1Policy::report_ihop_statistics() {
 898   _ihop_control->print();
 899 }
 900 
 901 void G1Policy::print_phases() {
 902   phase_times()->print();
 903 }
 904 
 905 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
 906                                               size_t rs_length) const {
 907   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
 908   return




  40 #include "gc/g1/g1Policy.hpp"
  41 #include "gc/g1/g1SurvivorRegions.hpp"
  42 #include "gc/g1/g1YoungGenSizer.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionRemSet.hpp"
  45 #include "gc/shared/concurrentGCBreakpoints.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "logging/log.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "utilities/debug.hpp"
  52 #include "utilities/growableArray.hpp"
  53 #include "utilities/pair.hpp"
  54 
  55 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  56   _predictor(G1ConfidencePercent / 100.0),
  57   _analytics(new G1Analytics(&_predictor)),
  58   _remset_tracker(),
  59   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
  60   _old_gen_alloc_tracker(),
  61   _ihop_control(create_ihop_control(&_old_gen_alloc_tracker, &_predictor)),
  62   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  63   _full_collection_start_sec(0.0),
  64   _young_list_target_length(0),
  65   _young_list_fixed_length(0),
  66   _young_list_max_length(0),
  67   _eden_surv_rate_group(new G1SurvRateGroup()),
  68   _survivor_surv_rate_group(new G1SurvRateGroup()),
  69   _reserve_factor((double) G1ReservePercent / 100.0),
  70   _reserve_regions(0),
  71   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  72   _free_regions_at_end_of_collection(0),
  73   _rs_length(0),
  74   _rs_length_prediction(0),
  75   _pending_cards_at_gc_start(0),

  76   _concurrent_start_to_mixed(),
  77   _collection_set(NULL),
  78   _g1h(NULL),
  79   _phase_times_timer(gc_timer),
  80   _phase_times(NULL),
  81   _mark_remark_start_sec(0),
  82   _mark_cleanup_start_sec(0),
  83   _tenuring_threshold(MaxTenuringThreshold),
  84   _max_survivor_regions(0),
  85   _survivors_age_table(true)
  86 {
  87 }
  88 
  89 G1Policy::~G1Policy() {
  90   delete _ihop_control;
  91   delete _young_gen_sizer;
  92 }
  93 
  94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  95   if (G1Arguments::is_heterogeneous_heap()) {


 452 
 453   collector_state()->set_in_full_gc(false);
 454 
 455   // "Nuke" the heuristics that control the young/mixed GC
 456   // transitions and make sure we start with young GCs after the Full GC.
 457   collector_state()->set_in_young_only_phase(true);
 458   collector_state()->set_in_young_gc_before_mixed(false);
 459   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 460   collector_state()->set_in_concurrent_start_gc(false);
 461   collector_state()->set_mark_or_rebuild_in_progress(false);
 462   collector_state()->set_clearing_next_bitmap(false);
 463 
 464   _eden_surv_rate_group->start_adding_regions();
 465   // also call this on any additional surv rate groups
 466 
 467   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 468   _survivor_surv_rate_group->reset();
 469   update_young_list_max_and_target_length();
 470   update_rs_length_prediction();
 471 
 472   _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
 473 
 474   record_pause(FullGC, _full_collection_start_sec, end_sec);
 475 }
 476 
 477 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
 478   log_debug(gc, refine, stats)
 479            ("%s refinement: %.2fms, refined: " SIZE_FORMAT
 480             ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
 481             kind,
 482             stats.refinement_time().seconds() * MILLIUNITS,
 483             stats.refined_cards(),
 484             stats.precleaned_cards(),
 485             stats.dirtied_cards());
 486 }
 487 
 488 void G1Policy::record_concurrent_refinement_stats() {
 489   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 490   _pending_cards_at_gc_start = dcqs.num_cards();
 491 
 492   // Collect per-thread stats, mostly from mutator activity.


 787 
 788   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 789          "If the last pause has been concurrent start, we should not have been in the marking window");
 790   if (is_concurrent_start_pause(this_pause)) {
 791     collector_state()->set_mark_or_rebuild_in_progress(true);
 792   }
 793 
 794   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 795 
 796   update_rs_length_prediction();
 797 
 798   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 799   // that in this case we are not running in a "normal" operating mode.
 800   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 801     // IHOP control wants to know the expected young gen length if it were not
 802     // restrained by the heap reserve. Using the actual length would make the
 803     // prediction too small and the limit the young gen every time we get to the
 804     // predicted target occupancy.
 805     size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
 806 
 807     _old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * HeapRegion::GrainBytes);
 808     update_ihop_prediction(app_time_ms / 1000.0,

 809                            last_unrestrained_young_length * HeapRegion::GrainBytes,
 810                            is_young_only_pause(this_pause));
 811 
 812     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 813   } else {
 814     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 815     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 816     // the marking threads may have received an uncharacterisic amount of cpu time
 817     // for completing the marking, i.e. are faster than expected.
 818     // This skews the predicted marking length towards smaller values which might cause
 819     // the mark start being too late.
 820     _concurrent_start_to_mixed.reset();
 821   }
 822 
 823   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 824   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 825 
 826   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 827     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 828                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 829                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
 830 
 831     scan_logged_cards_time_goal_ms = 0;
 832   } else {
 833     scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
 834   }
 835 
 836   double const logged_cards_time = logged_cards_processing_time();
 837 
 838   log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
 839                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 840 
 841   _g1h->concurrent_refine()->adjust(logged_cards_time,
 842                                     phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
 843                                     scan_logged_cards_time_goal_ms);
 844 }
 845 
 846 G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
 847                                              const G1Predictions* predictor) {
 848   if (G1UseAdaptiveIHOP) {
 849     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
 850                                      old_gen_alloc_tracker,
 851                                      predictor,
 852                                      G1ReservePercent,
 853                                      G1HeapWastePercent);
 854   } else {
 855     return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, old_gen_alloc_tracker);
 856   }
 857 }
 858 
 859 void G1Policy::update_ihop_prediction(double mutator_time_s,

 860                                       size_t young_gen_size,
 861                                       bool this_gc_was_young_only) {
 862   // Always try to update IHOP prediction. Even evacuation failures give information
 863   // about e.g. whether to start IHOP earlier next time.
 864 
 865   // Avoid using really small application times that might create samples with
 866   // very high or very low values. They may be caused by e.g. back-to-back gcs.
 867   double const min_valid_time = 1e-6;
 868 
 869   bool report = false;
 870 
 871   double marking_to_mixed_time = -1.0;
 872   if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
 873     marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
 874     assert(marking_to_mixed_time > 0.0,
 875            "Concurrent start to mixed time must be larger than zero but is %.3f",
 876            marking_to_mixed_time);
 877     if (marking_to_mixed_time > min_valid_time) {
 878       _ihop_control->update_marking_length(marking_to_mixed_time);
 879       report = true;
 880     }
 881   }
 882 
 883   // As an approximation for the young gc promotion rates during marking we use
 884   // all of them. In many applications there are only a few if any young gcs during
 885   // marking, which makes any prediction useless. This increases the accuracy of the
 886   // prediction.
 887   if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
 888     _ihop_control->update_allocation_info(mutator_time_s, young_gen_size);
 889     report = true;
 890   }
 891 
 892   if (report) {
 893     report_ihop_statistics();
 894   }
 895 }
 896 
 897 void G1Policy::report_ihop_statistics() {
 898   _ihop_control->print();
 899 }
 900 
 901 void G1Policy::print_phases() {
 902   phase_times()->print();
 903 }
 904 
 905 double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
 906                                               size_t rs_length) const {
 907   size_t effective_scanned_cards = _analytics->predict_scan_card_num(rs_length, collector_state()->in_young_only_phase());
 908   return


< prev index next >