1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentMark.hpp"
  28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  32 #include "gc_implementation/g1/g1Log.hpp"
  33 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  34 #include "gc_implementation/shared/gcPolicyCounters.hpp"
  35 #include "runtime/arguments.hpp"
  36 #include "runtime/java.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "utilities/debug.hpp"
  39 
  40 // Different defaults for different number of GC threads
  41 // They were chosen by running GCOld and SPECjbb on debris with different
  42 //   numbers of GC threads and choosing them based on the results
  43 
  44 // all the same
  45 static double rs_length_diff_defaults[] = {
  46   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  47 };
  48 
  49 static double cost_per_card_ms_defaults[] = {
  50   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  51 };
  52 
  53 // all the same
  54 static double young_cards_per_entry_ratio_defaults[] = {
  55   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
  56 };
  57 
  58 static double cost_per_entry_ms_defaults[] = {
  59   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
  60 };
  61 
  62 static double cost_per_byte_ms_defaults[] = {
  63   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  64 };
  65 
  66 // these should be pretty consistent
  67 static double constant_other_time_ms_defaults[] = {
  68   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  69 };
  70 
  71 
  72 static double young_other_cost_per_region_ms_defaults[] = {
  73   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  74 };
  75 
  76 static double non_young_other_cost_per_region_ms_defaults[] = {
  77   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  78 };
  79 
  80 // Help class for avoiding interleaved logging
  81 class LineBuffer: public StackObj {
  82 
  83 private:
  84   static const int BUFFER_LEN = 1024;
  85   static const int INDENT_CHARS = 3;
  86   char _buffer[BUFFER_LEN];
  87   int _indent_level;
  88   int _cur;
  89 
  90   void vappend(const char* format, va_list ap) {
  91     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
  92     if (res != -1) {
  93       _cur += res;
  94     } else {
  95       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
  96       _buffer[BUFFER_LEN -1] = 0;
  97       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
  98     }
  99   }
 100 
 101 public:
 102   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
 103     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
 104       _buffer[_cur] = ' ';
 105     }
 106   }
 107 
 108 #ifndef PRODUCT
 109   ~LineBuffer() {
 110     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
 111   }
 112 #endif
 113 
 114   void append(const char* format, ...) {
 115     va_list ap;
 116     va_start(ap, format);
 117     vappend(format, ap);
 118     va_end(ap);
 119   }
 120 
 121   void append_and_print_cr(const char* format, ...) {
 122     va_list ap;
 123     va_start(ap, format);
 124     vappend(format, ap);
 125     va_end(ap);
 126     gclog_or_tty->print_cr("%s", _buffer);
 127     _cur = _indent_level * INDENT_CHARS;
 128   }
 129 };
 130 
 131 G1CollectorPolicy::G1CollectorPolicy() :
 132   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
 133                         ? ParallelGCThreads : 1),
 134 
 135   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 136   _stop_world_start(0.0),
 137 
 138   _cur_clear_ct_time_ms(0.0),
 139   _root_region_scan_wait_time_ms(0.0),
 140 
 141   _cur_ref_proc_time_ms(0.0),
 142   _cur_ref_enq_time_ms(0.0),
 143 
 144 #ifndef PRODUCT
 145   _min_clear_cc_time_ms(-1.0),
 146   _max_clear_cc_time_ms(-1.0),
 147   _cur_clear_cc_time_ms(0.0),
 148   _cum_clear_cc_time_ms(0.0),
 149   _num_cc_clears(0L),
 150 #endif
 151 
 152   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 153   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 154 
 155   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 156   _prev_collection_pause_end_ms(0.0),
 157   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 158   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 159   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 160   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 161   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 162   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 163   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 164   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 165   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 166   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 167   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 168   _non_young_other_cost_per_region_ms_seq(
 169                                          new TruncatedSeq(TruncatedSeqLength)),
 170 
 171   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 172   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 173 
 174   _pause_time_target_ms((double) MaxGCPauseMillis),
 175 
 176   _gcs_are_young(true),
 177   _young_pause_num(0),
 178   _mixed_pause_num(0),
 179 
 180   _during_marking(false),
 181   _in_marking_window(false),
 182   _in_marking_window_im(false),
 183 
 184   _recent_prev_end_times_for_all_gcs_sec(
 185                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 186 
 187   _recent_avg_pause_time_ratio(0.0),
 188 
 189   _initiate_conc_mark_if_possible(false),
 190   _during_initial_mark_pause(false),
 191   _last_young_gc(false),
 192   _last_gc_was_young(false),
 193 
 194   _eden_bytes_before_gc(0),
 195   _survivor_bytes_before_gc(0),
 196   _capacity_before_gc(0),
 197 
 198   _eden_cset_region_length(0),
 199   _survivor_cset_region_length(0),
 200   _old_cset_region_length(0),
 201 
 202   _collection_set(NULL),
 203   _collection_set_bytes_used_before(0),
 204 
 205   // Incremental CSet attributes
 206   _inc_cset_build_state(Inactive),
 207   _inc_cset_head(NULL),
 208   _inc_cset_tail(NULL),
 209   _inc_cset_bytes_used_before(0),
 210   _inc_cset_max_finger(NULL),
 211   _inc_cset_recorded_rs_lengths(0),
 212   _inc_cset_recorded_rs_lengths_diffs(0),
 213   _inc_cset_predicted_elapsed_time_ms(0.0),
 214   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 215 
 216 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 217 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 218 #endif // _MSC_VER
 219 
 220   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
 221                                                  G1YoungSurvRateNumRegionsSummary)),
 222   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
 223                                               G1YoungSurvRateNumRegionsSummary)),
 224   // add here any more surv rate groups
 225   _recorded_survivor_regions(0),
 226   _recorded_survivor_head(NULL),
 227   _recorded_survivor_tail(NULL),
 228   _survivors_age_table(true),
 229 
 230   _gc_overhead_perc(0.0) {
 231 
 232   // Set up the region size and associated fields. Given that the
 233   // policy is created before the heap, we have to set this up here,
 234   // so it's done as soon as possible.
 235   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
 236   HeapRegionRemSet::setup_remset_size();
 237 
 238   G1ErgoVerbose::initialize();
 239   if (PrintAdaptiveSizePolicy) {
 240     // Currently, we only use a single switch for all the heuristics.
 241     G1ErgoVerbose::set_enabled(true);
 242     // Given that we don't currently have a verboseness level
 243     // parameter, we'll hardcode this to high. This can be easily
 244     // changed in the future.
 245     G1ErgoVerbose::set_level(ErgoHigh);
 246   } else {
 247     G1ErgoVerbose::set_enabled(false);
 248   }
 249 
 250   // Verify PLAB sizes
 251   const size_t region_size = HeapRegion::GrainWords;
 252   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
 253     char buffer[128];
 254     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
 255                  OldPLABSize > region_size ? "Old" : "Young", region_size);
 256     vm_exit_during_initialization(buffer);
 257   }
 258 
 259   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 260   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 261 
 262   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
 263   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
 264   _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
 265 
 266   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
 267   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
 268 
 269   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
 270 
 271   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
 272 
 273   _par_last_termination_times_ms = new double[_parallel_gc_threads];
 274   _par_last_termination_attempts = new double[_parallel_gc_threads];
 275   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
 276   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
 277   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
 278 
 279   int index;
 280   if (ParallelGCThreads == 0)
 281     index = 0;
 282   else if (ParallelGCThreads > 8)
 283     index = 7;
 284   else
 285     index = ParallelGCThreads - 1;
 286 
 287   _pending_card_diff_seq->add(0.0);
 288   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 289   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 290   _young_cards_per_entry_ratio_seq->add(
 291                                   young_cards_per_entry_ratio_defaults[index]);
 292   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 293   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 294   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 295   _young_other_cost_per_region_ms_seq->add(
 296                                young_other_cost_per_region_ms_defaults[index]);
 297   _non_young_other_cost_per_region_ms_seq->add(
 298                            non_young_other_cost_per_region_ms_defaults[index]);
 299 
 300   // Below, we might need to calculate the pause time target based on
 301   // the pause interval. When we do so we are going to give G1 maximum
 302   // flexibility and allow it to do pauses when it needs to. So, we'll
 303   // arrange that the pause interval to be pause time target + 1 to
 304   // ensure that a) the pause time target is maximized with respect to
 305   // the pause interval and b) we maintain the invariant that pause
 306   // time target < pause interval. If the user does not want this
 307   // maximum flexibility, they will have to set the pause interval
 308   // explicitly.
 309 
 310   // First make sure that, if either parameter is set, its value is
 311   // reasonable.
 312   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 313     if (MaxGCPauseMillis < 1) {
 314       vm_exit_during_initialization("MaxGCPauseMillis should be "
 315                                     "greater than 0");
 316     }
 317   }
 318   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 319     if (GCPauseIntervalMillis < 1) {
 320       vm_exit_during_initialization("GCPauseIntervalMillis should be "
 321                                     "greater than 0");
 322     }
 323   }
 324 
 325   // Then, if the pause time target parameter was not set, set it to
 326   // the default value.
 327   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 328     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 329       // The default pause time target in G1 is 200ms
 330       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 331     } else {
 332       // We do not allow the pause interval to be set without the
 333       // pause time target
 334       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
 335                                     "without setting MaxGCPauseMillis");
 336     }
 337   }
 338 
 339   // Then, if the interval parameter was not set, set it according to
 340   // the pause time target (this will also deal with the case when the
 341   // pause time target is the default value).
 342   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 343     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 344   }
 345 
 346   // Finally, make sure that the two parameters are consistent.
 347   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 348     char buffer[256];
 349     jio_snprintf(buffer, 256,
 350                  "MaxGCPauseMillis (%u) should be less than "
 351                  "GCPauseIntervalMillis (%u)",
 352                  MaxGCPauseMillis, GCPauseIntervalMillis);
 353     vm_exit_during_initialization(buffer);
 354   }
 355 
 356   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 357   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 358   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 359   _sigma = (double) G1ConfidencePercent / 100.0;
 360 
 361   // start conservatively (around 50ms is about right)
 362   _concurrent_mark_remark_times_ms->add(0.05);
 363   _concurrent_mark_cleanup_times_ms->add(0.20);
 364   _tenuring_threshold = MaxTenuringThreshold;
 365   // _max_survivor_regions will be calculated by
 366   // update_young_list_target_length() during initialization.
 367   _max_survivor_regions = 0;
 368 
 369   assert(GCTimeRatio > 0,
 370          "we should have set it to a default value set_g1_gc_flags() "
 371          "if a user set it to 0");
 372   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 373 
 374   uintx reserve_perc = G1ReservePercent;
 375   // Put an artificial ceiling on this so that it's not set to a silly value.
 376   if (reserve_perc > 50) {
 377     reserve_perc = 50;
 378     warning("G1ReservePercent is set to a value that is too large, "
 379             "it's been updated to %u", reserve_perc);
 380   }
 381   _reserve_factor = (double) reserve_perc / 100.0;
 382   // This will be set when the heap is expanded
 383   // for the first time during initialization.
 384   _reserve_regions = 0;
 385 
 386   initialize_all();
 387   _collectionSetChooser = new CollectionSetChooser();
 388   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
 389 }
 390 
 391 void G1CollectorPolicy::initialize_flags() {
 392   set_min_alignment(HeapRegion::GrainBytes);
 393   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
 394   if (SurvivorRatio < 1) {
 395     vm_exit_during_initialization("Invalid survivor ratio specified");
 396   }
 397   CollectorPolicy::initialize_flags();
 398 }
 399 
 400 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
 401   assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
 402   assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
 403   assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
 404 
 405   if (FLAG_IS_CMDLINE(NewRatio)) {
 406     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
 407       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
 408     } else {
 409       _sizer_kind = SizerNewRatio;
 410       _adaptive_size = false;
 411       return;
 412     }
 413   }
 414 
 415   if (FLAG_IS_CMDLINE(NewSize)) {
 416     _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
 417                                      1U);
 418     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 419       _max_desired_young_length =
 420                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 421                                   1U);
 422       _sizer_kind = SizerMaxAndNewSize;
 423       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
 424     } else {
 425       _sizer_kind = SizerNewSizeOnly;
 426     }
 427   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
 428     _max_desired_young_length =
 429                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
 430                                   1U);
 431     _sizer_kind = SizerMaxNewSizeOnly;
 432   }
 433 }
 434 
 435 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
 436   uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
 437   return MAX2(1U, default_value);
 438 }
 439 
 440 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
 441   uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
 442   return MAX2(1U, default_value);
 443 }
 444 
 445 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
 446   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
 447 
 448   switch (_sizer_kind) {
 449     case SizerDefaults:
 450       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
 451       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
 452       break;
 453     case SizerNewSizeOnly:
 454       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
 455       _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
 456       break;
 457     case SizerMaxNewSizeOnly:
 458       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
 459       _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
 460       break;
 461     case SizerMaxAndNewSize:
 462       // Do nothing. Values set on the command line, don't update them at runtime.
 463       break;
 464     case SizerNewRatio:
 465       _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
 466       _max_desired_young_length = _min_desired_young_length;
 467       break;
 468     default:
 469       ShouldNotReachHere();
 470   }
 471 
 472   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
 473 }
 474 
 475 void G1CollectorPolicy::init() {
 476   // Set aside an initial future to_space.
 477   _g1 = G1CollectedHeap::heap();
 478 
 479   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 480 
 481   initialize_gc_policy_counters();
 482 
 483   if (adaptive_young_list_length()) {
 484     _young_list_fixed_length = 0;
 485   } else {
 486     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
 487   }
 488   _free_regions_at_end_of_collection = _g1->free_regions();
 489   update_young_list_target_length();
 490   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
 491 
 492   // We may immediately start allocating regions and placing them on the
 493   // collection set list. Initialize the per-collection set info
 494   start_incremental_cset_building();
 495 }
 496 
 497 // Create the jstat counters for the policy.
 498 void G1CollectorPolicy::initialize_gc_policy_counters() {
 499   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 500 }
 501 
 502 bool G1CollectorPolicy::predict_will_fit(uint young_length,
 503                                          double base_time_ms,
 504                                          uint base_free_regions,
 505                                          double target_pause_time_ms) {
 506   if (young_length >= base_free_regions) {
 507     // end condition 1: not enough space for the young regions
 508     return false;
 509   }
 510 
 511   double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
 512   size_t bytes_to_copy =
 513                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
 514   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
 515   double young_other_time_ms = predict_young_other_time_ms(young_length);
 516   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
 517   if (pause_time_ms > target_pause_time_ms) {
 518     // end condition 2: prediction is over the target pause time
 519     return false;
 520   }
 521 
 522   size_t free_bytes =
 523                    (base_free_regions - young_length) * HeapRegion::GrainBytes;
 524   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
 525     // end condition 3: out-of-space (conservatively!)
 526     return false;
 527   }
 528 
 529   // success!
 530   return true;
 531 }
 532 
 533 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
 534   // re-calculate the necessary reserve
 535   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
 536   // We use ceiling so that if reserve_regions_d is > 0.0 (but
 537   // smaller than 1.0) we'll get 1.
 538   _reserve_regions = (uint) ceil(reserve_regions_d);
 539 
 540   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 541 }
 542 
 543 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
 544                                                        uint base_min_length) {
 545   uint desired_min_length = 0;
 546   if (adaptive_young_list_length()) {
 547     if (_alloc_rate_ms_seq->num() > 3) {
 548       double now_sec = os::elapsedTime();
 549       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 550       double alloc_rate_ms = predict_alloc_rate_ms();
 551       desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
 552     } else {
 553       // otherwise we don't have enough info to make the prediction
 554     }
 555   }
 556   desired_min_length += base_min_length;
 557   // make sure we don't go below any user-defined minimum bound
 558   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 559 }
 560 
 561 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
 562   // Here, we might want to also take into account any additional
 563   // constraints (i.e., user-defined minimum bound). Currently, we
 564   // effectively don't set this bound.
 565   return _young_gen_sizer->max_desired_young_length();
 566 }
 567 
 568 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
 569   if (rs_lengths == (size_t) -1) {
 570     // if it's set to the default value (-1), we should predict it;
 571     // otherwise, use the given value.
 572     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
 573   }
 574 
 575   // Calculate the absolute and desired min bounds.
 576 
 577   // This is how many young regions we already have (currently: the survivors).
 578   uint base_min_length = recorded_survivor_regions();
 579   // This is the absolute minimum young length, which ensures that we
 580   // can allocate one eden region in the worst-case.
 581   uint absolute_min_length = base_min_length + 1;
 582   uint desired_min_length =
 583                      calculate_young_list_desired_min_length(base_min_length);
 584   if (desired_min_length < absolute_min_length) {
 585     desired_min_length = absolute_min_length;
 586   }
 587 
 588   // Calculate the absolute and desired max bounds.
 589 
 590   // We will try our best not to "eat" into the reserve.
 591   uint absolute_max_length = 0;
 592   if (_free_regions_at_end_of_collection > _reserve_regions) {
 593     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
 594   }
 595   uint desired_max_length = calculate_young_list_desired_max_length();
 596   if (desired_max_length > absolute_max_length) {
 597     desired_max_length = absolute_max_length;
 598   }
 599 
 600   uint young_list_target_length = 0;
 601   if (adaptive_young_list_length()) {
 602     if (gcs_are_young()) {
 603       young_list_target_length =
 604                         calculate_young_list_target_length(rs_lengths,
 605                                                            base_min_length,
 606                                                            desired_min_length,
 607                                                            desired_max_length);
 608       _rs_lengths_prediction = rs_lengths;
 609     } else {
 610       // Don't calculate anything and let the code below bound it to
 611       // the desired_min_length, i.e., do the next GC as soon as
 612       // possible to maximize how many old regions we can add to it.
 613     }
 614   } else {
 615     // The user asked for a fixed young gen so we'll fix the young gen
 616     // whether the next GC is young or mixed.
 617     young_list_target_length = _young_list_fixed_length;
 618   }
 619 
 620   // Make sure we don't go over the desired max length, nor under the
 621   // desired min length. In case they clash, desired_min_length wins
 622   // which is why that test is second.
 623   if (young_list_target_length > desired_max_length) {
 624     young_list_target_length = desired_max_length;
 625   }
 626   if (young_list_target_length < desired_min_length) {
 627     young_list_target_length = desired_min_length;
 628   }
 629 
 630   assert(young_list_target_length > recorded_survivor_regions(),
 631          "we should be able to allocate at least one eden region");
 632   assert(young_list_target_length >= absolute_min_length, "post-condition");
 633   _young_list_target_length = young_list_target_length;
 634 
 635   update_max_gc_locker_expansion();
 636 }
 637 
 638 uint
 639 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
 640                                                      uint base_min_length,
 641                                                      uint desired_min_length,
 642                                                      uint desired_max_length) {
 643   assert(adaptive_young_list_length(), "pre-condition");
 644   assert(gcs_are_young(), "only call this for young GCs");
 645 
 646   // In case some edge-condition makes the desired max length too small...
 647   if (desired_max_length <= desired_min_length) {
 648     return desired_min_length;
 649   }
 650 
 651   // We'll adjust min_young_length and max_young_length not to include
 652   // the already allocated young regions (i.e., so they reflect the
 653   // min and max eden regions we'll allocate). The base_min_length
 654   // will be reflected in the predictions by the
 655   // survivor_regions_evac_time prediction.
 656   assert(desired_min_length > base_min_length, "invariant");
 657   uint min_young_length = desired_min_length - base_min_length;
 658   assert(desired_max_length > base_min_length, "invariant");
 659   uint max_young_length = desired_max_length - base_min_length;
 660 
 661   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 662   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
 663   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
 664   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
 665   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
 666   double base_time_ms =
 667     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
 668     survivor_regions_evac_time;
 669   uint available_free_regions = _free_regions_at_end_of_collection;
 670   uint base_free_regions = 0;
 671   if (available_free_regions > _reserve_regions) {
 672     base_free_regions = available_free_regions - _reserve_regions;
 673   }
 674 
 675   // Here, we will make sure that the shortest young length that
 676   // makes sense fits within the target pause time.
 677 
 678   if (predict_will_fit(min_young_length, base_time_ms,
 679                        base_free_regions, target_pause_time_ms)) {
 680     // The shortest young length will fit into the target pause time;
 681     // we'll now check whether the absolute maximum number of young
 682     // regions will fit in the target pause time. If not, we'll do
 683     // a binary search between min_young_length and max_young_length.
 684     if (predict_will_fit(max_young_length, base_time_ms,
 685                          base_free_regions, target_pause_time_ms)) {
 686       // The maximum young length will fit into the target pause time.
 687       // We are done so set min young length to the maximum length (as
 688       // the result is assumed to be returned in min_young_length).
 689       min_young_length = max_young_length;
 690     } else {
 691       // The maximum possible number of young regions will not fit within
 692       // the target pause time so we'll search for the optimal
 693       // length. The loop invariants are:
 694       //
 695       // min_young_length < max_young_length
 696       // min_young_length is known to fit into the target pause time
 697       // max_young_length is known not to fit into the target pause time
 698       //
 699       // Going into the loop we know the above hold as we've just
 700       // checked them. Every time around the loop we check whether
 701       // the middle value between min_young_length and
 702       // max_young_length fits into the target pause time. If it
 703       // does, it becomes the new min. If it doesn't, it becomes
 704       // the new max. This way we maintain the loop invariants.
 705 
 706       assert(min_young_length < max_young_length, "invariant");
 707       uint diff = (max_young_length - min_young_length) / 2;
 708       while (diff > 0) {
 709         uint young_length = min_young_length + diff;
 710         if (predict_will_fit(young_length, base_time_ms,
 711                              base_free_regions, target_pause_time_ms)) {
 712           min_young_length = young_length;
 713         } else {
 714           max_young_length = young_length;
 715         }
 716         assert(min_young_length <  max_young_length, "invariant");
 717         diff = (max_young_length - min_young_length) / 2;
 718       }
 719       // The results is min_young_length which, according to the
 720       // loop invariants, should fit within the target pause time.
 721 
 722       // These are the post-conditions of the binary search above:
 723       assert(min_young_length < max_young_length,
 724              "otherwise we should have discovered that max_young_length "
 725              "fits into the pause target and not done the binary search");
 726       assert(predict_will_fit(min_young_length, base_time_ms,
 727                               base_free_regions, target_pause_time_ms),
 728              "min_young_length, the result of the binary search, should "
 729              "fit into the pause target");
 730       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
 731                                base_free_regions, target_pause_time_ms),
 732              "min_young_length, the result of the binary search, should be "
 733              "optimal, so no larger length should fit into the pause target");
 734     }
 735   } else {
 736     // Even the minimum length doesn't fit into the pause time
 737     // target, return it as the result nevertheless.
 738   }
 739   return base_min_length + min_young_length;
 740 }
 741 
 742 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
 743   double survivor_regions_evac_time = 0.0;
 744   for (HeapRegion * r = _recorded_survivor_head;
 745        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
 746        r = r->get_next_young_region()) {
 747     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
 748   }
 749   return survivor_regions_evac_time;
 750 }
 751 
 752 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
 753   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 754 
 755   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
 756   if (rs_lengths > _rs_lengths_prediction) {
 757     // add 10% to avoid having to recalculate often
 758     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
 759     update_young_list_target_length(rs_lengths_prediction);
 760   }
 761 }
 762 
 763 
 764 
 765 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
 766                                                bool is_tlab,
 767                                                bool* gc_overhead_limit_was_exceeded) {
 768   guarantee(false, "Not using this policy feature yet.");
 769   return NULL;
 770 }
 771 
 772 // This method controls how a collector handles one or more
 773 // of its generations being fully allocated.
 774 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
 775                                                        bool is_tlab) {
 776   guarantee(false, "Not using this policy feature yet.");
 777   return NULL;
 778 }
 779 
 780 
 781 #ifndef PRODUCT
 782 bool G1CollectorPolicy::verify_young_ages() {
 783   HeapRegion* head = _g1->young_list()->first_region();
 784   return
 785     verify_young_ages(head, _short_lived_surv_rate_group);
 786   // also call verify_young_ages on any additional surv rate groups
 787 }
 788 
 789 bool
 790 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 791                                      SurvRateGroup *surv_rate_group) {
 792   guarantee( surv_rate_group != NULL, "pre-condition" );
 793 
 794   const char* name = surv_rate_group->name();
 795   bool ret = true;
 796   int prev_age = -1;
 797 
 798   for (HeapRegion* curr = head;
 799        curr != NULL;
 800        curr = curr->get_next_young_region()) {
 801     SurvRateGroup* group = curr->surv_rate_group();
 802     if (group == NULL && !curr->is_survivor()) {
 803       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 804       ret = false;
 805     }
 806 
 807     if (surv_rate_group == group) {
 808       int age = curr->age_in_surv_rate_group();
 809 
 810       if (age < 0) {
 811         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 812         ret = false;
 813       }
 814 
 815       if (age <= prev_age) {
 816         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 817                                "(%d, %d)", name, age, prev_age);
 818         ret = false;
 819       }
 820       prev_age = age;
 821     }
 822   }
 823 
 824   return ret;
 825 }
 826 #endif // PRODUCT
 827 
 828 void G1CollectorPolicy::record_full_collection_start() {
 829   _cur_collection_start_sec = os::elapsedTime();
 830   // Release the future to-space so that it is available for compaction into.
 831   _g1->set_full_collection();
 832 }
 833 
 834 void G1CollectorPolicy::record_full_collection_end() {
 835   // Consider this like a collection pause for the purposes of allocation
 836   // since last pause.
 837   double end_sec = os::elapsedTime();
 838   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
 839   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 840 
 841   _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
 842 
 843   update_recent_gc_times(end_sec, full_gc_time_ms);
 844 
 845   _g1->clear_full_collection();
 846 
 847   // "Nuke" the heuristics that control the young/mixed GC
 848   // transitions and make sure we start with young GCs after the Full GC.
 849   set_gcs_are_young(true);
 850   _last_young_gc = false;
 851   clear_initiate_conc_mark_if_possible();
 852   clear_during_initial_mark_pause();
 853   _in_marking_window = false;
 854   _in_marking_window_im = false;
 855 
 856   _short_lived_surv_rate_group->start_adding_regions();
 857   // also call this on any additional surv rate groups
 858 
 859   record_survivor_regions(0, NULL, NULL);
 860 
 861   _free_regions_at_end_of_collection = _g1->free_regions();
 862   // Reset survivors SurvRateGroup.
 863   _survivor_surv_rate_group->reset();
 864   update_young_list_target_length();
 865   _collectionSetChooser->clear();
 866 }
 867 
 868 void G1CollectorPolicy::record_stop_world_start() {
 869   _stop_world_start = os::elapsedTime();
 870 }
 871 
 872 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 873                                                       size_t start_used) {
 874   if (G1Log::finer()) {
 875     gclog_or_tty->stamp(PrintGCTimeStamps);
 876     gclog_or_tty->print("[%s", (const char*)GCCauseString("GC pause", _g1->gc_cause())
 877       .append(gcs_are_young() ? " (young)" : " (mixed)"));
 878   }
 879 
 880   // We only need to do this here as the policy will only be applied
 881   // to the GC we're about to start. so, no point is calculating this
 882   // every time we calculate / recalculate the target young length.
 883   update_survivors_policy();
 884 
 885   assert(_g1->used() == _g1->recalculate_used(),
 886          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 887                  _g1->used(), _g1->recalculate_used()));
 888 
 889   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 890   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
 891   _stop_world_start = 0.0;
 892 
 893   _cur_collection_start_sec = start_time_sec;
 894   _cur_collection_pause_used_at_start_bytes = start_used;
 895   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 896   _pending_cards = _g1->pending_card_num();
 897   _max_pending_cards = _g1->max_pending_card_num();
 898 
 899   _bytes_in_collection_set_before_gc = 0;
 900   _bytes_copied_during_gc = 0;
 901 
 902   YoungList* young_list = _g1->young_list();
 903   _eden_bytes_before_gc = young_list->eden_used_bytes();
 904   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 905   _capacity_before_gc = _g1->capacity();
 906 
 907 #ifdef DEBUG
 908   // initialise these to something well known so that we can spot
 909   // if they are not set properly
 910 
 911   for (int i = 0; i < _parallel_gc_threads; ++i) {
 912     _par_last_gc_worker_start_times_ms[i] = -1234.0;
 913     _par_last_ext_root_scan_times_ms[i] = -1234.0;
 914     _par_last_satb_filtering_times_ms[i] = -1234.0;
 915     _par_last_update_rs_times_ms[i] = -1234.0;
 916     _par_last_update_rs_processed_buffers[i] = -1234.0;
 917     _par_last_scan_rs_times_ms[i] = -1234.0;
 918     _par_last_obj_copy_times_ms[i] = -1234.0;
 919     _par_last_termination_times_ms[i] = -1234.0;
 920     _par_last_termination_attempts[i] = -1234.0;
 921     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 922     _par_last_gc_worker_times_ms[i] = -1234.0;
 923     _par_last_gc_worker_other_times_ms[i] = -1234.0;
 924   }
 925 #endif
 926 
 927   // This is initialized to zero here and is set during the evacuation
 928   // pause if we actually waited for the root region scanning to finish.
 929   _root_region_scan_wait_time_ms = 0.0;
 930 
 931   _last_gc_was_young = false;
 932 
 933   // do that for any other surv rate groups
 934   _short_lived_surv_rate_group->stop_adding_regions();
 935   _survivors_age_table.clear();
 936 
 937   assert( verify_young_ages(), "region age verification" );
 938 }
 939 
 940 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 941                                                    mark_init_elapsed_time_ms) {
 942   _during_marking = true;
 943   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 944   clear_during_initial_mark_pause();
 945   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 946 }
 947 
 948 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 949   _mark_remark_start_sec = os::elapsedTime();
 950   _during_marking = false;
 951 }
 952 
 953 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 954   double end_time_sec = os::elapsedTime();
 955   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 956   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 957   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 958   _prev_collection_pause_end_ms += elapsed_time_ms;
 959 
 960   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 961 }
 962 
 963 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 964   _mark_cleanup_start_sec = os::elapsedTime();
 965 }
 966 
 967 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 968   _last_young_gc = true;
 969   _in_marking_window = false;
 970 }
 971 
 972 void G1CollectorPolicy::record_concurrent_pause() {
 973   if (_stop_world_start > 0.0) {
 974     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 975     _trace_gen0_time_data.record_yield_time(yield_ms);
 976   }
 977 }
 978 
 979 void G1CollectorPolicy::record_concurrent_pause_end() {
 980 }
 981 
 982 template<class T>
 983 T sum_of(T* sum_arr, int start, int n, int N) {
 984   T sum = (T)0;
 985   for (int i = 0; i < n; i++) {
 986     int j = (start + i) % N;
 987     sum += sum_arr[j];
 988   }
 989   return sum;
 990 }
 991 
 992 void G1CollectorPolicy::print_par_stats(int level,
 993                                         const char* str,
 994                                         double* data,
 995                                         bool showDecimals) {
 996   double min = data[0], max = data[0];
 997   double total = 0.0;
 998   LineBuffer buf(level);
 999   buf.append("[%s (ms):", str);
1000   for (uint i = 0; i < no_of_gc_threads(); ++i) {
1001     double val = data[i];
1002     if (val < min)
1003       min = val;
1004     if (val > max)
1005       max = val;
1006     total += val;
1007     if (G1Log::finest()) {
1008       if (showDecimals) {
1009         buf.append("  %.1lf", val);
1010       } else {
1011         buf.append("  %d", (int)val);
1012       }
1013     }
1014   }
1015 
1016   if (G1Log::finest()) {
1017     buf.append_and_print_cr("");
1018   }
1019   double avg = total / (double) no_of_gc_threads();
1020   if (showDecimals) {
1021     buf.append_and_print_cr(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf, Sum: %.1lf]",
1022       min, avg, max, max - min, total);
1023   } else {
1024     buf.append_and_print_cr(" Min: %d, Avg: %d, Max: %d, Diff: %d, Sum: %d]",
1025       (int)min, (int)avg, (int)max, (int)max - (int)min, (int)total);
1026   }
1027 }
1028 
1029 void G1CollectorPolicy::print_stats(int level,
1030                                     const char* str,
1031                                     double value) {
1032   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
1033 }
1034 
1035 void G1CollectorPolicy::print_stats(int level,
1036                                     const char* str,
1037                                     double value,
1038                                     int workers) {
1039   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
1040 }
1041 
1042 void G1CollectorPolicy::print_stats(int level,
1043                                     const char* str,
1044                                     int value) {
1045   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
1046 }
1047 
1048 double G1CollectorPolicy::avg_value(double* data) {
1049   if (G1CollectedHeap::use_parallel_gc_threads()) {
1050     double ret = 0.0;
1051     for (uint i = 0; i < no_of_gc_threads(); ++i) {
1052       ret += data[i];
1053     }
1054     return ret / (double) no_of_gc_threads();
1055   } else {
1056     return data[0];
1057   }
1058 }
1059 
1060 double G1CollectorPolicy::max_value(double* data) {
1061   if (G1CollectedHeap::use_parallel_gc_threads()) {
1062     double ret = data[0];
1063     for (uint i = 1; i < no_of_gc_threads(); ++i) {
1064       if (data[i] > ret) {
1065         ret = data[i];
1066       }
1067     }
1068     return ret;
1069   } else {
1070     return data[0];
1071   }
1072 }
1073 
1074 double G1CollectorPolicy::sum_of_values(double* data) {
1075   if (G1CollectedHeap::use_parallel_gc_threads()) {
1076     double sum = 0.0;
1077     for (uint i = 0; i < no_of_gc_threads(); i++) {
1078       sum += data[i];
1079     }
1080     return sum;
1081   } else {
1082     return data[0];
1083   }
1084 }
1085 
1086 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
1087   double ret = data1[0] + data2[0];
1088 
1089   if (G1CollectedHeap::use_parallel_gc_threads()) {
1090     for (uint i = 1; i < no_of_gc_threads(); ++i) {
1091       double data = data1[i] + data2[i];
1092       if (data > ret) {
1093         ret = data;
1094       }
1095     }
1096   }
1097   return ret;
1098 }
1099 
1100 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
1101   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
1102     return false;
1103   }
1104 
1105   size_t marking_initiating_used_threshold =
1106     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1107   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
1108   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
1109 
1110   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
1111     if (gcs_are_young()) {
1112       ergo_verbose5(ErgoConcCycles,
1113         "request concurrent cycle initiation",
1114         ergo_format_reason("occupancy higher than threshold")
1115         ergo_format_byte("occupancy")
1116         ergo_format_byte("allocation request")
1117         ergo_format_byte_perc("threshold")
1118         ergo_format_str("source"),
1119         cur_used_bytes,
1120         alloc_byte_size,
1121         marking_initiating_used_threshold,
1122         (double) InitiatingHeapOccupancyPercent,
1123         source);
1124       return true;
1125     } else {
1126       ergo_verbose5(ErgoConcCycles,
1127         "do not request concurrent cycle initiation",
1128         ergo_format_reason("still doing mixed collections")
1129         ergo_format_byte("occupancy")
1130         ergo_format_byte("allocation request")
1131         ergo_format_byte_perc("threshold")
1132         ergo_format_str("source"),
1133         cur_used_bytes,
1134         alloc_byte_size,
1135         marking_initiating_used_threshold,
1136         (double) InitiatingHeapOccupancyPercent,
1137         source);
1138     }
1139   }
1140 
1141   return false;
1142 }
1143 
1144 // Anything below that is considered to be zero
1145 #define MIN_TIMER_GRANULARITY 0.0000001
1146 
1147 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
1148   double end_time_sec = os::elapsedTime();
1149   double elapsed_ms = _last_pause_time_ms;
1150   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1151   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1152          "otherwise, the subtraction below does not make sense");
1153   size_t rs_size =
1154             _cur_collection_pause_used_regions_at_start - cset_region_length();
1155   size_t cur_used_bytes = _g1->used();
1156   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1157   bool last_pause_included_initial_mark = false;
1158   bool update_stats = !_g1->evacuation_failed();
1159   set_no_of_gc_threads(no_of_gc_threads);
1160 
1161 #ifndef PRODUCT
1162   if (G1YoungSurvRateVerbose) {
1163     gclog_or_tty->print_cr("");
1164     _short_lived_surv_rate_group->print();
1165     // do that for any other surv rate groups too
1166   }
1167 #endif // PRODUCT
1168 
1169   last_pause_included_initial_mark = during_initial_mark_pause();
1170   if (last_pause_included_initial_mark) {
1171     record_concurrent_mark_init_end(0.0);
1172   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
1173     // Note: this might have already been set, if during the last
1174     // pause we decided to start a cycle but at the beginning of
1175     // this pause we decided to postpone it. That's OK.
1176     set_initiate_conc_mark_if_possible();
1177   }
1178 
1179   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1180                           end_time_sec, false);
1181 
1182   size_t freed_bytes =
1183     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
1184   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
1185 
1186   double survival_fraction =
1187     (double)surviving_bytes/
1188     (double)_collection_set_bytes_used_before;
1189 
1190   // These values are used to update the summary information that is
1191   // displayed when TraceGen0Time is enabled, and are output as part
1192   // of the "finer" output, in the non-parallel case.
1193 
1194   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
1195   double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
1196   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
1197   double update_rs_processed_buffers =
1198     sum_of_values(_par_last_update_rs_processed_buffers);
1199   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
1200   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
1201   double termination_time = avg_value(_par_last_termination_times_ms);
1202 
1203   double known_time = ext_root_scan_time +
1204                       satb_filtering_time +
1205                       update_rs_time +
1206                       scan_rs_time +
1207                       obj_copy_time;
1208 
1209   double other_time_ms = elapsed_ms;
1210 
1211   // Subtract the root region scanning wait time. It's initialized to
1212   // zero at the start of the pause.
1213   other_time_ms -= _root_region_scan_wait_time_ms;
1214 
1215   if (parallel) {
1216     other_time_ms -= _cur_collection_par_time_ms;
1217   } else {
1218     other_time_ms -= known_time;
1219   }
1220 
1221   // Now subtract the time taken to fix up roots in generated code
1222   other_time_ms -= _cur_collection_code_root_fixup_time_ms;
1223 
1224   // Subtract the time taken to clean the card table from the
1225   // current value of "other time"
1226   other_time_ms -= _cur_clear_ct_time_ms;
1227 
1228   // TraceGen0Time and TraceGen1Time summary info updating.
1229 
1230   if (update_stats) {
1231     double parallel_known_time = known_time + termination_time;
1232     double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
1233 
1234     _trace_gen0_time_data.record_end_collection(
1235       elapsed_ms, other_time_ms, _root_region_scan_wait_time_ms, _cur_collection_par_time_ms,
1236       ext_root_scan_time, satb_filtering_time, update_rs_time, scan_rs_time, obj_copy_time,
1237       termination_time, parallel_other_time, _cur_clear_ct_time_ms);
1238 
1239     // this is where we update the allocation rate of the application
1240     double app_time_ms =
1241       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
1242     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1243       // This usually happens due to the timer not having the required
1244       // granularity. Some Linuxes are the usual culprits.
1245       // We'll just set it to something (arbitrarily) small.
1246       app_time_ms = 1.0;
1247     }
1248     // We maintain the invariant that all objects allocated by mutator
1249     // threads will be allocated out of eden regions. So, we can use
1250     // the eden region number allocated since the previous GC to
1251     // calculate the application's allocate rate. The only exception
1252     // to that is humongous objects that are allocated separately. But
1253     // given that humongous object allocations do not really affect
1254     // either the pause's duration nor when the next pause will take
1255     // place we can safely ignore them here.
1256     uint regions_allocated = eden_cset_region_length();
1257     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1258     _alloc_rate_ms_seq->add(alloc_rate_ms);
1259 
1260     double interval_ms =
1261       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1262     update_recent_gc_times(end_time_sec, elapsed_ms);
1263     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1264     if (recent_avg_pause_time_ratio() < 0.0 ||
1265         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1266 #ifndef PRODUCT
1267       // Dump info to allow post-facto debugging
1268       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
1269       gclog_or_tty->print_cr("-------------------------------------------");
1270       gclog_or_tty->print_cr("Recent GC Times (ms):");
1271       _recent_gc_times_ms->dump();
1272       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
1273       _recent_prev_end_times_for_all_gcs_sec->dump();
1274       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
1275                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
1276       // In debug mode, terminate the JVM if the user wants to debug at this point.
1277       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
1278 #endif  // !PRODUCT
1279       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1280       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
1281       if (_recent_avg_pause_time_ratio < 0.0) {
1282         _recent_avg_pause_time_ratio = 0.0;
1283       } else {
1284         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
1285         _recent_avg_pause_time_ratio = 1.0;
1286       }
1287     }
1288   }
1289 
1290   if (G1Log::finer()) {
1291     bool print_marking_info =
1292       _g1->mark_in_progress() && !last_pause_included_initial_mark;
1293 
1294     gclog_or_tty->print_cr("%s, %1.8lf secs]",
1295                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
1296                            elapsed_ms / 1000.0);
1297 
1298     if (_root_region_scan_wait_time_ms > 0.0) {
1299       print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
1300     }
1301     if (parallel) {
1302       print_stats(1, "Parallel Time", _cur_collection_par_time_ms, no_of_gc_threads);
1303       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
1304       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
1305       if (print_marking_info) {
1306         print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
1307       }
1308       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
1309       if (G1Log::finest()) {
1310         print_par_stats(3, "Processed Buffers", _par_last_update_rs_processed_buffers,
1311           false /* showDecimals */);
1312       }
1313       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
1314       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
1315       print_par_stats(2, "Termination", _par_last_termination_times_ms);
1316       if (G1Log::finest()) {
1317         print_par_stats(3, "Termination Attempts", _par_last_termination_attempts,
1318           false /* showDecimals */);
1319       }
1320 
1321       for (int i = 0; i < _parallel_gc_threads; i++) {
1322         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
1323                                           _par_last_gc_worker_start_times_ms[i];
1324 
1325         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
1326                                    _par_last_satb_filtering_times_ms[i] +
1327                                    _par_last_update_rs_times_ms[i] +
1328                                    _par_last_scan_rs_times_ms[i] +
1329                                    _par_last_obj_copy_times_ms[i] +
1330                                    _par_last_termination_times_ms[i];
1331 
1332         _par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
1333                                                 worker_known_time;
1334       }
1335 
1336       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
1337       print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
1338       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
1339     } else {
1340       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
1341       if (print_marking_info) {
1342         print_stats(1, "SATB Filtering", satb_filtering_time);
1343       }
1344       print_stats(1, "Update RS", update_rs_time);
1345       if (G1Log::finest()) {
1346         print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
1347       }
1348       print_stats(1, "Scan RS", scan_rs_time);
1349       print_stats(1, "Object Copying", obj_copy_time);
1350     }
1351     print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
1352     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
1353 #ifndef PRODUCT
1354     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
1355     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
1356     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
1357     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
1358     if (_num_cc_clears > 0) {
1359       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
1360     }
1361 #endif
1362     print_stats(1, "Other", other_time_ms);
1363     print_stats(2, "Choose CSet",
1364                    (_recorded_young_cset_choice_time_ms +
1365                     _recorded_non_young_cset_choice_time_ms));
1366     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
1367     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
1368     print_stats(2, "Free CSet",
1369                    (_recorded_young_free_cset_time_ms +
1370                     _recorded_non_young_free_cset_time_ms));
1371   }
1372 
1373   bool new_in_marking_window = _in_marking_window;
1374   bool new_in_marking_window_im = false;
1375   if (during_initial_mark_pause()) {
1376     new_in_marking_window = true;
1377     new_in_marking_window_im = true;
1378   }
1379 
1380   if (_last_young_gc) {
1381     // This is supposed to to be the "last young GC" before we start
1382     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1383 
1384     if (!last_pause_included_initial_mark) {
1385       if (next_gc_should_be_mixed("start mixed GCs",
1386                                   "do not start mixed GCs")) {
1387         set_gcs_are_young(false);
1388       }
1389     } else {
1390       ergo_verbose0(ErgoMixedGCs,
1391                     "do not start mixed GCs",
1392                     ergo_format_reason("concurrent cycle is about to start"));
1393     }
1394     _last_young_gc = false;
1395   }
1396 
1397   if (!_last_gc_was_young) {
1398     // This is a mixed GC. Here we decide whether to continue doing
1399     // mixed GCs or not.
1400 
1401     if (!next_gc_should_be_mixed("continue mixed GCs",
1402                                  "do not continue mixed GCs")) {
1403       set_gcs_are_young(true);
1404     }
1405   }
1406 
1407   _short_lived_surv_rate_group->start_adding_regions();
1408   // do that for any other surv rate groupsx
1409 
1410   if (update_stats) {
1411     double pause_time_ms = elapsed_ms;
1412 
1413     size_t diff = 0;
1414     if (_max_pending_cards >= _pending_cards) {
1415       diff = _max_pending_cards - _pending_cards;
1416     }
1417     _pending_card_diff_seq->add((double) diff);
1418 
1419     double cost_per_card_ms = 0.0;
1420     if (_pending_cards > 0) {
1421       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1422       _cost_per_card_ms_seq->add(cost_per_card_ms);
1423     }
1424 
1425     size_t cards_scanned = _g1->cards_scanned();
1426 
1427     double cost_per_entry_ms = 0.0;
1428     if (cards_scanned > 10) {
1429       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
1430       if (_last_gc_was_young) {
1431         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1432       } else {
1433         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1434       }
1435     }
1436 
1437     if (_max_rs_lengths > 0) {
1438       double cards_per_entry_ratio =
1439         (double) cards_scanned / (double) _max_rs_lengths;
1440       if (_last_gc_was_young) {
1441         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1442       } else {
1443         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1444       }
1445     }
1446 
1447     // This is defensive. For a while _max_rs_lengths could get
1448     // smaller than _recorded_rs_lengths which was causing
1449     // rs_length_diff to get very large and mess up the RSet length
1450     // predictions. The reason was unsafe concurrent updates to the
1451     // _inc_cset_recorded_rs_lengths field which the code below guards
1452     // against (see CR 7118202). This bug has now been fixed (see CR
1453     // 7119027). However, I'm still worried that
1454     // _inc_cset_recorded_rs_lengths might still end up somewhat
1455     // inaccurate. The concurrent refinement thread calculates an
1456     // RSet's length concurrently with other CR threads updating it
1457     // which might cause it to calculate the length incorrectly (if,
1458     // say, it's in mid-coarsening). So I'll leave in the defensive
1459     // conditional below just in case.
1460     size_t rs_length_diff = 0;
1461     if (_max_rs_lengths > _recorded_rs_lengths) {
1462       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1463     }
1464     _rs_length_diff_seq->add((double) rs_length_diff);
1465 
1466     size_t copied_bytes = surviving_bytes;
1467     double cost_per_byte_ms = 0.0;
1468     if (copied_bytes > 0) {
1469       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
1470       if (_in_marking_window) {
1471         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1472       } else {
1473         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1474       }
1475     }
1476 
1477     double all_other_time_ms = pause_time_ms -
1478       (update_rs_time + scan_rs_time + obj_copy_time + termination_time);
1479 
1480     double young_other_time_ms = 0.0;
1481     if (young_cset_region_length() > 0) {
1482       young_other_time_ms =
1483         _recorded_young_cset_choice_time_ms +
1484         _recorded_young_free_cset_time_ms;
1485       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1486                                           (double) young_cset_region_length());
1487     }
1488     double non_young_other_time_ms = 0.0;
1489     if (old_cset_region_length() > 0) {
1490       non_young_other_time_ms =
1491         _recorded_non_young_cset_choice_time_ms +
1492         _recorded_non_young_free_cset_time_ms;
1493 
1494       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1495                                             (double) old_cset_region_length());
1496     }
1497 
1498     double constant_other_time_ms = all_other_time_ms -
1499       (young_other_time_ms + non_young_other_time_ms);
1500     _constant_other_time_ms_seq->add(constant_other_time_ms);
1501 
1502     double survival_ratio = 0.0;
1503     if (_bytes_in_collection_set_before_gc > 0) {
1504       survival_ratio = (double) _bytes_copied_during_gc /
1505                                    (double) _bytes_in_collection_set_before_gc;
1506     }
1507 
1508     _pending_cards_seq->add((double) _pending_cards);
1509     _rs_lengths_seq->add((double) _max_rs_lengths);
1510   }
1511 
1512   _in_marking_window = new_in_marking_window;
1513   _in_marking_window_im = new_in_marking_window_im;
1514   _free_regions_at_end_of_collection = _g1->free_regions();
1515   update_young_list_target_length();
1516 
1517   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1518   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1519   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
1520 
1521   _collectionSetChooser->verify();
1522 }
1523 
1524 #define EXT_SIZE_FORMAT "%.1f%s"
1525 #define EXT_SIZE_PARAMS(bytes)                                  \
1526   byte_size_in_proper_unit((double)(bytes)),                    \
1527   proper_unit_for_byte_size((bytes))
1528 
1529 void G1CollectorPolicy::print_heap_transition() {
1530   if (G1Log::finer()) {
1531     YoungList* young_list = _g1->young_list();
1532     size_t eden_bytes = young_list->eden_used_bytes();
1533     size_t survivor_bytes = young_list->survivor_used_bytes();
1534     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1535     size_t used = _g1->used();
1536     size_t capacity = _g1->capacity();
1537     size_t eden_capacity =
1538       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1539 
1540     gclog_or_tty->print_cr(
1541       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1542       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1543       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1544       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1545       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1546       EXT_SIZE_PARAMS(_prev_eden_capacity),
1547       EXT_SIZE_PARAMS(eden_bytes),
1548       EXT_SIZE_PARAMS(eden_capacity),
1549       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1550       EXT_SIZE_PARAMS(survivor_bytes),
1551       EXT_SIZE_PARAMS(used_before_gc),
1552       EXT_SIZE_PARAMS(_capacity_before_gc),
1553       EXT_SIZE_PARAMS(used),
1554       EXT_SIZE_PARAMS(capacity));
1555 
1556     _prev_eden_capacity = eden_capacity;
1557   } else if (G1Log::fine()) {
1558     _g1->print_size_transition(gclog_or_tty,
1559                                _cur_collection_pause_used_at_start_bytes,
1560                                _g1->used(), _g1->capacity());
1561   }
1562 }
1563 
1564 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1565                                                      double update_rs_processed_buffers,
1566                                                      double goal_ms) {
1567   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1568   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1569 
1570   if (G1UseAdaptiveConcRefinement) {
1571     const int k_gy = 3, k_gr = 6;
1572     const double inc_k = 1.1, dec_k = 0.9;
1573 
1574     int g = cg1r->green_zone();
1575     if (update_rs_time > goal_ms) {
1576       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1577     } else {
1578       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1579         g = (int)MAX2(g * inc_k, g + 1.0);
1580       }
1581     }
1582     // Change the refinement threads params
1583     cg1r->set_green_zone(g);
1584     cg1r->set_yellow_zone(g * k_gy);
1585     cg1r->set_red_zone(g * k_gr);
1586     cg1r->reinitialize_threads();
1587 
1588     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1589     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1590                                     cg1r->yellow_zone());
1591     // Change the barrier params
1592     dcqs.set_process_completed_threshold(processing_threshold);
1593     dcqs.set_max_completed_queue(cg1r->red_zone());
1594   }
1595 
1596   int curr_queue_size = dcqs.completed_buffers_num();
1597   if (curr_queue_size >= cg1r->yellow_zone()) {
1598     dcqs.set_completed_queue_padding(curr_queue_size);
1599   } else {
1600     dcqs.set_completed_queue_padding(0);
1601   }
1602   dcqs.notify_if_necessary();
1603 }
1604 
1605 double
1606 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1607   size_t rs_length = predict_rs_length_diff();
1608   size_t card_num;
1609   if (gcs_are_young()) {
1610     card_num = predict_young_card_num(rs_length);
1611   } else {
1612     card_num = predict_non_young_card_num(rs_length);
1613   }
1614   return predict_base_elapsed_time_ms(pending_cards, card_num);
1615 }
1616 
1617 double
1618 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1619                                                 size_t scanned_cards) {
1620   return
1621     predict_rs_update_time_ms(pending_cards) +
1622     predict_rs_scan_time_ms(scanned_cards) +
1623     predict_constant_other_time_ms();
1624 }
1625 
1626 double
1627 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1628                                                   bool young) {
1629   size_t rs_length = hr->rem_set()->occupied();
1630   size_t card_num;
1631   if (gcs_are_young()) {
1632     card_num = predict_young_card_num(rs_length);
1633   } else {
1634     card_num = predict_non_young_card_num(rs_length);
1635   }
1636   size_t bytes_to_copy = predict_bytes_to_copy(hr);
1637 
1638   double region_elapsed_time_ms =
1639     predict_rs_scan_time_ms(card_num) +
1640     predict_object_copy_time_ms(bytes_to_copy);
1641 
1642   if (young)
1643     region_elapsed_time_ms += predict_young_other_time_ms(1);
1644   else
1645     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1646 
1647   return region_elapsed_time_ms;
1648 }
1649 
1650 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1651   size_t bytes_to_copy;
1652   if (hr->is_marked())
1653     bytes_to_copy = hr->max_live_bytes();
1654   else {
1655     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1656     int age = hr->age_in_surv_rate_group();
1657     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1658     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1659   }
1660   return bytes_to_copy;
1661 }
1662 
1663 void
1664 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1665                                             uint survivor_cset_region_length) {
1666   _eden_cset_region_length     = eden_cset_region_length;
1667   _survivor_cset_region_length = survivor_cset_region_length;
1668   _old_cset_region_length      = 0;
1669 }
1670 
1671 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1672   _recorded_rs_lengths = rs_lengths;
1673 }
1674 
1675 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1676                                                double elapsed_ms) {
1677   _recent_gc_times_ms->add(elapsed_ms);
1678   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1679   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1680 }
1681 
1682 size_t G1CollectorPolicy::expansion_amount() {
1683   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1684   double threshold = _gc_overhead_perc;
1685   if (recent_gc_overhead > threshold) {
1686     // We will double the existing space, or take
1687     // G1ExpandByPercentOfAvailable % of the available expansion
1688     // space, whichever is smaller, bounded below by a minimum
1689     // expansion (unless that's all that's left.)
1690     const size_t min_expand_bytes = 1*M;
1691     size_t reserved_bytes = _g1->max_capacity();
1692     size_t committed_bytes = _g1->capacity();
1693     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1694     size_t expand_bytes;
1695     size_t expand_bytes_via_pct =
1696       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1697     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1698     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1699     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1700 
1701     ergo_verbose5(ErgoHeapSizing,
1702                   "attempt heap expansion",
1703                   ergo_format_reason("recent GC overhead higher than "
1704                                      "threshold after GC")
1705                   ergo_format_perc("recent GC overhead")
1706                   ergo_format_perc("threshold")
1707                   ergo_format_byte("uncommitted")
1708                   ergo_format_byte_perc("calculated expansion amount"),
1709                   recent_gc_overhead, threshold,
1710                   uncommitted_bytes,
1711                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1712 
1713     return expand_bytes;
1714   } else {
1715     return 0;
1716   }
1717 }
1718 
1719 class CountCSClosure: public HeapRegionClosure {
1720   G1CollectorPolicy* _g1_policy;
1721 public:
1722   CountCSClosure(G1CollectorPolicy* g1_policy) :
1723     _g1_policy(g1_policy) {}
1724   bool doHeapRegion(HeapRegion* r) {
1725     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
1726     return false;
1727   }
1728 };
1729 
1730 void G1CollectorPolicy::count_CS_bytes_used() {
1731   CountCSClosure cs_closure(this);
1732   _g1->collection_set_iterate(&cs_closure);
1733 }
1734 
1735 void G1CollectorPolicy::print_tracing_info() const {
1736   _trace_gen0_time_data.print();
1737   _trace_gen1_time_data.print();
1738 }
1739 
1740 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1741 #ifndef PRODUCT
1742   _short_lived_surv_rate_group->print_surv_rate_summary();
1743   // add this call for any other surv rate groups
1744 #endif // PRODUCT
1745 }
1746 
1747 #ifndef PRODUCT
1748 // for debugging, bit of a hack...
1749 static char*
1750 region_num_to_mbs(int length) {
1751   static char buffer[64];
1752   double bytes = (double) (length * HeapRegion::GrainBytes);
1753   double mbs = bytes / (double) (1024 * 1024);
1754   sprintf(buffer, "%7.2lfMB", mbs);
1755   return buffer;
1756 }
1757 #endif // PRODUCT
1758 
1759 uint G1CollectorPolicy::max_regions(int purpose) {
1760   switch (purpose) {
1761     case GCAllocForSurvived:
1762       return _max_survivor_regions;
1763     case GCAllocForTenured:
1764       return REGIONS_UNLIMITED;
1765     default:
1766       ShouldNotReachHere();
1767       return REGIONS_UNLIMITED;
1768   };
1769 }
1770 
1771 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1772   uint expansion_region_num = 0;
1773   if (GCLockerEdenExpansionPercent > 0) {
1774     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1775     double expansion_region_num_d = perc * (double) _young_list_target_length;
1776     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1777     // less than 1.0) we'll get 1.
1778     expansion_region_num = (uint) ceil(expansion_region_num_d);
1779   } else {
1780     assert(expansion_region_num == 0, "sanity");
1781   }
1782   _young_list_max_length = _young_list_target_length + expansion_region_num;
1783   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1784 }
1785 
1786 // Calculates survivor space parameters.
1787 void G1CollectorPolicy::update_survivors_policy() {
1788   double max_survivor_regions_d =
1789                  (double) _young_list_target_length / (double) SurvivorRatio;
1790   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1791   // smaller than 1.0) we'll get 1.
1792   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1793 
1794   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1795         HeapRegion::GrainWords * _max_survivor_regions);
1796 }
1797 
1798 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1799                                                      GCCause::Cause gc_cause) {
1800   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1801   if (!during_cycle) {
1802     ergo_verbose1(ErgoConcCycles,
1803                   "request concurrent cycle initiation",
1804                   ergo_format_reason("requested by GC cause")
1805                   ergo_format_str("GC cause"),
1806                   GCCause::to_string(gc_cause));
1807     set_initiate_conc_mark_if_possible();
1808     return true;
1809   } else {
1810     ergo_verbose1(ErgoConcCycles,
1811                   "do not request concurrent cycle initiation",
1812                   ergo_format_reason("concurrent cycle already in progress")
1813                   ergo_format_str("GC cause"),
1814                   GCCause::to_string(gc_cause));
1815     return false;
1816   }
1817 }
1818 
1819 void
1820 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1821   // We are about to decide on whether this pause will be an
1822   // initial-mark pause.
1823 
1824   // First, during_initial_mark_pause() should not be already set. We
1825   // will set it here if we have to. However, it should be cleared by
1826   // the end of the pause (it's only set for the duration of an
1827   // initial-mark pause).
1828   assert(!during_initial_mark_pause(), "pre-condition");
1829 
1830   if (initiate_conc_mark_if_possible()) {
1831     // We had noticed on a previous pause that the heap occupancy has
1832     // gone over the initiating threshold and we should start a
1833     // concurrent marking cycle. So we might initiate one.
1834 
1835     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1836     if (!during_cycle) {
1837       // The concurrent marking thread is not "during a cycle", i.e.,
1838       // it has completed the last one. So we can go ahead and
1839       // initiate a new cycle.
1840 
1841       set_during_initial_mark_pause();
1842       // We do not allow mixed GCs during marking.
1843       if (!gcs_are_young()) {
1844         set_gcs_are_young(true);
1845         ergo_verbose0(ErgoMixedGCs,
1846                       "end mixed GCs",
1847                       ergo_format_reason("concurrent cycle is about to start"));
1848       }
1849 
1850       // And we can now clear initiate_conc_mark_if_possible() as
1851       // we've already acted on it.
1852       clear_initiate_conc_mark_if_possible();
1853 
1854       ergo_verbose0(ErgoConcCycles,
1855                   "initiate concurrent cycle",
1856                   ergo_format_reason("concurrent cycle initiation requested"));
1857     } else {
1858       // The concurrent marking thread is still finishing up the
1859       // previous cycle. If we start one right now the two cycles
1860       // overlap. In particular, the concurrent marking thread might
1861       // be in the process of clearing the next marking bitmap (which
1862       // we will use for the next cycle if we start one). Starting a
1863       // cycle now will be bad given that parts of the marking
1864       // information might get cleared by the marking thread. And we
1865       // cannot wait for the marking thread to finish the cycle as it
1866       // periodically yields while clearing the next marking bitmap
1867       // and, if it's in a yield point, it's waiting for us to
1868       // finish. So, at this point we will not start a cycle and we'll
1869       // let the concurrent marking thread complete the last one.
1870       ergo_verbose0(ErgoConcCycles,
1871                     "do not initiate concurrent cycle",
1872                     ergo_format_reason("concurrent cycle already in progress"));
1873     }
1874   }
1875 }
1876 
1877 class KnownGarbageClosure: public HeapRegionClosure {
1878   G1CollectedHeap* _g1h;
1879   CollectionSetChooser* _hrSorted;
1880 
1881 public:
1882   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1883     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1884 
1885   bool doHeapRegion(HeapRegion* r) {
1886     // We only include humongous regions in collection
1887     // sets when concurrent mark shows that their contained object is
1888     // unreachable.
1889 
1890     // Do we have any marking information for this region?
1891     if (r->is_marked()) {
1892       // We will skip any region that's currently used as an old GC
1893       // alloc region (we should not consider those for collection
1894       // before we fill them up).
1895       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1896         _hrSorted->add_region(r);
1897       }
1898     }
1899     return false;
1900   }
1901 };
1902 
1903 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1904   G1CollectedHeap* _g1h;
1905   CollectionSetChooser* _hrSorted;
1906   uint _marked_regions_added;
1907   size_t _reclaimable_bytes_added;
1908   uint _chunk_size;
1909   uint _cur_chunk_idx;
1910   uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
1911 
1912   void get_new_chunk() {
1913     _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
1914     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
1915   }
1916   void add_region(HeapRegion* r) {
1917     if (_cur_chunk_idx == _cur_chunk_end) {
1918       get_new_chunk();
1919     }
1920     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
1921     _hrSorted->set_region(_cur_chunk_idx, r);
1922     _marked_regions_added++;
1923     _reclaimable_bytes_added += r->reclaimable_bytes();
1924     _cur_chunk_idx++;
1925   }
1926 
1927 public:
1928   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1929                            uint chunk_size) :
1930       _g1h(G1CollectedHeap::heap()),
1931       _hrSorted(hrSorted), _chunk_size(chunk_size),
1932       _marked_regions_added(0), _reclaimable_bytes_added(0),
1933       _cur_chunk_idx(0), _cur_chunk_end(0) { }
1934 
1935   bool doHeapRegion(HeapRegion* r) {
1936     // Do we have any marking information for this region?
1937     if (r->is_marked()) {
1938       // We will skip any region that's currently used as an old GC
1939       // alloc region (we should not consider those for collection
1940       // before we fill them up).
1941       if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1942         add_region(r);
1943       }
1944     }
1945     return false;
1946   }
1947   uint marked_regions_added() { return _marked_regions_added; }
1948   size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
1949 };
1950 
1951 class ParKnownGarbageTask: public AbstractGangTask {
1952   CollectionSetChooser* _hrSorted;
1953   uint _chunk_size;
1954   G1CollectedHeap* _g1;
1955 public:
1956   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
1957     AbstractGangTask("ParKnownGarbageTask"),
1958     _hrSorted(hrSorted), _chunk_size(chunk_size),
1959     _g1(G1CollectedHeap::heap()) { }
1960 
1961   void work(uint worker_id) {
1962     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1963 
1964     // Back to zero for the claim value.
1965     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
1966                                          _g1->workers()->active_workers(),
1967                                          HeapRegion::InitialClaimValue);
1968     uint regions_added = parKnownGarbageCl.marked_regions_added();
1969     size_t reclaimable_bytes_added =
1970                                    parKnownGarbageCl.reclaimable_bytes_added();
1971     _hrSorted->update_totals(regions_added, reclaimable_bytes_added);
1972   }
1973 };
1974 
1975 void
1976 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1977   _collectionSetChooser->clear();
1978 
1979   uint region_num = _g1->n_regions();
1980   if (G1CollectedHeap::use_parallel_gc_threads()) {
1981     const uint OverpartitionFactor = 4;
1982     uint WorkUnit;
1983     // The use of MinChunkSize = 8 in the original code
1984     // causes some assertion failures when the total number of
1985     // region is less than 8.  The code here tries to fix that.
1986     // Should the original code also be fixed?
1987     if (no_of_gc_threads > 0) {
1988       const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1989       WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1990                       MinWorkUnit);
1991     } else {
1992       assert(no_of_gc_threads > 0,
1993         "The active gc workers should be greater than 0");
1994       // In a product build do something reasonable to avoid a crash.
1995       const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1996       WorkUnit =
1997         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1998              MinWorkUnit);
1999     }
2000     _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
2001                                                            WorkUnit);
2002     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
2003                                             (int) WorkUnit);
2004     _g1->workers()->run_task(&parKnownGarbageTask);
2005 
2006     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2007            "sanity check");
2008   } else {
2009     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
2010     _g1->heap_region_iterate(&knownGarbagecl);
2011   }
2012 
2013   _collectionSetChooser->sort_regions();
2014 
2015   double end_sec = os::elapsedTime();
2016   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
2017   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
2018   _cur_mark_stop_world_time_ms += elapsed_time_ms;
2019   _prev_collection_pause_end_ms += elapsed_time_ms;
2020   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
2021 }
2022 
2023 // Add the heap region at the head of the non-incremental collection set
2024 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
2025   assert(_inc_cset_build_state == Active, "Precondition");
2026   assert(!hr->is_young(), "non-incremental add of young region");
2027 
2028   assert(!hr->in_collection_set(), "should not already be in the CSet");
2029   hr->set_in_collection_set(true);
2030   hr->set_next_in_collection_set(_collection_set);
2031   _collection_set = hr;
2032   _collection_set_bytes_used_before += hr->used();
2033   _g1->register_region_with_in_cset_fast_test(hr);
2034   size_t rs_length = hr->rem_set()->occupied();
2035   _recorded_rs_lengths += rs_length;
2036   _old_cset_region_length += 1;
2037 }
2038 
2039 // Initialize the per-collection-set information
2040 void G1CollectorPolicy::start_incremental_cset_building() {
2041   assert(_inc_cset_build_state == Inactive, "Precondition");
2042 
2043   _inc_cset_head = NULL;
2044   _inc_cset_tail = NULL;
2045   _inc_cset_bytes_used_before = 0;
2046 
2047   _inc_cset_max_finger = 0;
2048   _inc_cset_recorded_rs_lengths = 0;
2049   _inc_cset_recorded_rs_lengths_diffs = 0;
2050   _inc_cset_predicted_elapsed_time_ms = 0.0;
2051   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
2052   _inc_cset_build_state = Active;
2053 }
2054 
2055 void G1CollectorPolicy::finalize_incremental_cset_building() {
2056   assert(_inc_cset_build_state == Active, "Precondition");
2057   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
2058 
2059   // The two "main" fields, _inc_cset_recorded_rs_lengths and
2060   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
2061   // that adds a new region to the CSet. Further updates by the
2062   // concurrent refinement thread that samples the young RSet lengths
2063   // are accumulated in the *_diffs fields. Here we add the diffs to
2064   // the "main" fields.
2065 
2066   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
2067     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
2068   } else {
2069     // This is defensive. The diff should in theory be always positive
2070     // as RSets can only grow between GCs. However, given that we
2071     // sample their size concurrently with other threads updating them
2072     // it's possible that we might get the wrong size back, which
2073     // could make the calculations somewhat inaccurate.
2074     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
2075     if (_inc_cset_recorded_rs_lengths >= diffs) {
2076       _inc_cset_recorded_rs_lengths -= diffs;
2077     } else {
2078       _inc_cset_recorded_rs_lengths = 0;
2079     }
2080   }
2081   _inc_cset_predicted_elapsed_time_ms +=
2082                                      _inc_cset_predicted_elapsed_time_ms_diffs;
2083 
2084   _inc_cset_recorded_rs_lengths_diffs = 0;
2085   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
2086 }
2087 
2088 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2089   // This routine is used when:
2090   // * adding survivor regions to the incremental cset at the end of an
2091   //   evacuation pause,
2092   // * adding the current allocation region to the incremental cset
2093   //   when it is retired, and
2094   // * updating existing policy information for a region in the
2095   //   incremental cset via young list RSet sampling.
2096   // Therefore this routine may be called at a safepoint by the
2097   // VM thread, or in-between safepoints by mutator threads (when
2098   // retiring the current allocation region) or a concurrent
2099   // refine thread (RSet sampling).
2100 
2101   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2102   size_t used_bytes = hr->used();
2103   _inc_cset_recorded_rs_lengths += rs_length;
2104   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
2105   _inc_cset_bytes_used_before += used_bytes;
2106 
2107   // Cache the values we have added to the aggregated informtion
2108   // in the heap region in case we have to remove this region from
2109   // the incremental collection set, or it is updated by the
2110   // rset sampling code
2111   hr->set_recorded_rs_length(rs_length);
2112   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
2113 }
2114 
2115 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
2116                                                      size_t new_rs_length) {
2117   // Update the CSet information that is dependent on the new RS length
2118   assert(hr->is_young(), "Precondition");
2119   assert(!SafepointSynchronize::is_at_safepoint(),
2120                                                "should not be at a safepoint");
2121 
2122   // We could have updated _inc_cset_recorded_rs_lengths and
2123   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
2124   // that atomically, as this code is executed by a concurrent
2125   // refinement thread, potentially concurrently with a mutator thread
2126   // allocating a new region and also updating the same fields. To
2127   // avoid the atomic operations we accumulate these updates on two
2128   // separate fields (*_diffs) and we'll just add them to the "main"
2129   // fields at the start of a GC.
2130 
2131   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
2132   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
2133   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
2134 
2135   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
2136   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
2137   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
2138   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
2139 
2140   hr->set_recorded_rs_length(new_rs_length);
2141   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
2142 }
2143 
2144 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
2145   assert(hr->is_young(), "invariant");
2146   assert(hr->young_index_in_cset() > -1, "should have already been set");
2147   assert(_inc_cset_build_state == Active, "Precondition");
2148 
2149   // We need to clear and set the cached recorded/cached collection set
2150   // information in the heap region here (before the region gets added
2151   // to the collection set). An individual heap region's cached values
2152   // are calculated, aggregated with the policy collection set info,
2153   // and cached in the heap region here (initially) and (subsequently)
2154   // by the Young List sampling code.
2155 
2156   size_t rs_length = hr->rem_set()->occupied();
2157   add_to_incremental_cset_info(hr, rs_length);
2158 
2159   HeapWord* hr_end = hr->end();
2160   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
2161 
2162   assert(!hr->in_collection_set(), "invariant");
2163   hr->set_in_collection_set(true);
2164   assert( hr->next_in_collection_set() == NULL, "invariant");
2165 
2166   _g1->register_region_with_in_cset_fast_test(hr);
2167 }
2168 
2169 // Add the region at the RHS of the incremental cset
2170 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
2171   // We should only ever be appending survivors at the end of a pause
2172   assert( hr->is_survivor(), "Logic");
2173 
2174   // Do the 'common' stuff
2175   add_region_to_incremental_cset_common(hr);
2176 
2177   // Now add the region at the right hand side
2178   if (_inc_cset_tail == NULL) {
2179     assert(_inc_cset_head == NULL, "invariant");
2180     _inc_cset_head = hr;
2181   } else {
2182     _inc_cset_tail->set_next_in_collection_set(hr);
2183   }
2184   _inc_cset_tail = hr;
2185 }
2186 
2187 // Add the region to the LHS of the incremental cset
2188 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
2189   // Survivors should be added to the RHS at the end of a pause
2190   assert(!hr->is_survivor(), "Logic");
2191 
2192   // Do the 'common' stuff
2193   add_region_to_incremental_cset_common(hr);
2194 
2195   // Add the region at the left hand side
2196   hr->set_next_in_collection_set(_inc_cset_head);
2197   if (_inc_cset_head == NULL) {
2198     assert(_inc_cset_tail == NULL, "Invariant");
2199     _inc_cset_tail = hr;
2200   }
2201   _inc_cset_head = hr;
2202 }
2203 
2204 #ifndef PRODUCT
2205 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
2206   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
2207 
2208   st->print_cr("\nCollection_set:");
2209   HeapRegion* csr = list_head;
2210   while (csr != NULL) {
2211     HeapRegion* next = csr->next_in_collection_set();
2212     assert(csr->in_collection_set(), "bad CS");
2213     st->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
2214                  HR_FORMAT_PARAMS(csr),
2215                  csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
2216                  csr->age_in_surv_rate_group_cond());
2217     csr = next;
2218   }
2219 }
2220 #endif // !PRODUCT
2221 
2222 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2223                                                 const char* false_action_str) {
2224   CollectionSetChooser* cset_chooser = _collectionSetChooser;
2225   if (cset_chooser->is_empty()) {
2226     ergo_verbose0(ErgoMixedGCs,
2227                   false_action_str,
2228                   ergo_format_reason("candidate old regions not available"));
2229     return false;
2230   }
2231   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2232   size_t capacity_bytes = _g1->capacity();
2233   double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
2234   double threshold = (double) G1HeapWastePercent;
2235   if (perc < threshold) {
2236     ergo_verbose4(ErgoMixedGCs,
2237               false_action_str,
2238               ergo_format_reason("reclaimable percentage lower than threshold")
2239               ergo_format_region("candidate old regions")
2240               ergo_format_byte_perc("reclaimable")
2241               ergo_format_perc("threshold"),
2242               cset_chooser->remaining_regions(),
2243               reclaimable_bytes, perc, threshold);
2244     return false;
2245   }
2246 
2247   ergo_verbose4(ErgoMixedGCs,
2248                 true_action_str,
2249                 ergo_format_reason("candidate old regions available")
2250                 ergo_format_region("candidate old regions")
2251                 ergo_format_byte_perc("reclaimable")
2252                 ergo_format_perc("threshold"),
2253                 cset_chooser->remaining_regions(),
2254                 reclaimable_bytes, perc, threshold);
2255   return true;
2256 }
2257 
2258 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
2259   // Set this here - in case we're not doing young collections.
2260   double non_young_start_time_sec = os::elapsedTime();
2261 
2262   YoungList* young_list = _g1->young_list();
2263   finalize_incremental_cset_building();
2264 
2265   guarantee(target_pause_time_ms > 0.0,
2266             err_msg("target_pause_time_ms = %1.6lf should be positive",
2267                     target_pause_time_ms));
2268   guarantee(_collection_set == NULL, "Precondition");
2269 
2270   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2271   double predicted_pause_time_ms = base_time_ms;
2272   double time_remaining_ms = target_pause_time_ms - base_time_ms;
2273 
2274   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2275                 "start choosing CSet",
2276                 ergo_format_ms("predicted base time")
2277                 ergo_format_ms("remaining time")
2278                 ergo_format_ms("target pause time"),
2279                 base_time_ms, time_remaining_ms, target_pause_time_ms);
2280 
2281   HeapRegion* hr;
2282   double young_start_time_sec = os::elapsedTime();
2283 
2284   _collection_set_bytes_used_before = 0;
2285   _last_gc_was_young = gcs_are_young() ? true : false;
2286 
2287   _trace_gen0_time_data.increment_collection_count(!_last_gc_was_young);
2288   if (_last_gc_was_young) {
2289     ++_young_pause_num;
2290   } else {
2291     ++_mixed_pause_num;
2292   }
2293 
2294   // The young list is laid with the survivor regions from the previous
2295   // pause are appended to the RHS of the young list, i.e.
2296   //   [Newly Young Regions ++ Survivors from last pause].
2297 
2298   uint survivor_region_length = young_list->survivor_length();
2299   uint eden_region_length = young_list->length() - survivor_region_length;
2300   init_cset_region_lengths(eden_region_length, survivor_region_length);
2301   hr = young_list->first_survivor_region();
2302   while (hr != NULL) {
2303     assert(hr->is_survivor(), "badly formed young list");
2304     hr->set_young();
2305     hr = hr->get_next_young_region();
2306   }
2307 
2308   // Clear the fields that point to the survivor list - they are all young now.
2309   young_list->clear_survivors();
2310 
2311   _collection_set = _inc_cset_head;
2312   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2313   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
2314   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
2315 
2316   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
2317                 "add young regions to CSet",
2318                 ergo_format_region("eden")
2319                 ergo_format_region("survivors")
2320                 ergo_format_ms("predicted young region time"),
2321                 eden_region_length, survivor_region_length,
2322                 _inc_cset_predicted_elapsed_time_ms);
2323 
2324   // The number of recorded young regions is the incremental
2325   // collection set's current size
2326   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2327 
2328   double young_end_time_sec = os::elapsedTime();
2329   _recorded_young_cset_choice_time_ms =
2330     (young_end_time_sec - young_start_time_sec) * 1000.0;
2331 
2332   // We are doing young collections so reset this.
2333   non_young_start_time_sec = young_end_time_sec;
2334 
2335   if (!gcs_are_young()) {
2336     CollectionSetChooser* cset_chooser = _collectionSetChooser;
2337     cset_chooser->verify();
2338     const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
2339     const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
2340 
2341     uint expensive_region_num = 0;
2342     bool check_time_remaining = adaptive_young_list_length();
2343     HeapRegion* hr = cset_chooser->peek();
2344     while (hr != NULL) {
2345       if (old_cset_region_length() >= max_old_cset_length) {
2346         // Added maximum number of old regions to the CSet.
2347         ergo_verbose2(ErgoCSetConstruction,
2348                       "finish adding old regions to CSet",
2349                       ergo_format_reason("old CSet region num reached max")
2350                       ergo_format_region("old")
2351                       ergo_format_region("max"),
2352                       old_cset_region_length(), max_old_cset_length);
2353         break;
2354       }
2355 
2356       double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2357       if (check_time_remaining) {
2358         if (predicted_time_ms > time_remaining_ms) {
2359           // Too expensive for the current CSet.
2360 
2361           if (old_cset_region_length() >= min_old_cset_length) {
2362             // We have added the minimum number of old regions to the CSet,
2363             // we are done with this CSet.
2364             ergo_verbose4(ErgoCSetConstruction,
2365                           "finish adding old regions to CSet",
2366                           ergo_format_reason("predicted time is too high")
2367                           ergo_format_ms("predicted time")
2368                           ergo_format_ms("remaining time")
2369                           ergo_format_region("old")
2370                           ergo_format_region("min"),
2371                           predicted_time_ms, time_remaining_ms,
2372                           old_cset_region_length(), min_old_cset_length);
2373             break;
2374           }
2375 
2376           // We'll add it anyway given that we haven't reached the
2377           // minimum number of old regions.
2378           expensive_region_num += 1;
2379         }
2380       } else {
2381         if (old_cset_region_length() >= min_old_cset_length) {
2382           // In the non-auto-tuning case, we'll finish adding regions
2383           // to the CSet if we reach the minimum.
2384           ergo_verbose2(ErgoCSetConstruction,
2385                         "finish adding old regions to CSet",
2386                         ergo_format_reason("old CSet region num reached min")
2387                         ergo_format_region("old")
2388                         ergo_format_region("min"),
2389                         old_cset_region_length(), min_old_cset_length);
2390           break;
2391         }
2392       }
2393 
2394       // We will add this region to the CSet.
2395       time_remaining_ms -= predicted_time_ms;
2396       predicted_pause_time_ms += predicted_time_ms;
2397       cset_chooser->remove_and_move_to_next(hr);
2398       _g1->old_set_remove(hr);
2399       add_old_region_to_cset(hr);
2400 
2401       hr = cset_chooser->peek();
2402     }
2403     if (hr == NULL) {
2404       ergo_verbose0(ErgoCSetConstruction,
2405                     "finish adding old regions to CSet",
2406                     ergo_format_reason("candidate old regions not available"));
2407     }
2408 
2409     if (expensive_region_num > 0) {
2410       // We print the information once here at the end, predicated on
2411       // whether we added any apparently expensive regions or not, to
2412       // avoid generating output per region.
2413       ergo_verbose4(ErgoCSetConstruction,
2414                     "added expensive regions to CSet",
2415                     ergo_format_reason("old CSet region num not reached min")
2416                     ergo_format_region("old")
2417                     ergo_format_region("expensive")
2418                     ergo_format_region("min")
2419                     ergo_format_ms("remaining time"),
2420                     old_cset_region_length(),
2421                     expensive_region_num,
2422                     min_old_cset_length,
2423                     time_remaining_ms);
2424     }
2425 
2426     cset_chooser->verify();
2427   }
2428 
2429   stop_incremental_cset_building();
2430 
2431   count_CS_bytes_used();
2432 
2433   ergo_verbose5(ErgoCSetConstruction,
2434                 "finish choosing CSet",
2435                 ergo_format_region("eden")
2436                 ergo_format_region("survivors")
2437                 ergo_format_region("old")
2438                 ergo_format_ms("predicted pause time")
2439                 ergo_format_ms("target pause time"),
2440                 eden_region_length, survivor_region_length,
2441                 old_cset_region_length(),
2442                 predicted_pause_time_ms, target_pause_time_ms);
2443 
2444   double non_young_end_time_sec = os::elapsedTime();
2445   _recorded_non_young_cset_choice_time_ms =
2446     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2447 }
2448 
2449 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2450   if(TraceGen0Time) {
2451     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2452   }
2453 }
2454 
2455 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2456   if(TraceGen0Time) {
2457     _all_yield_times_ms.add(yield_time_ms);
2458   }
2459 }
2460 
2461 void TraceGen0TimeData::record_end_collection(
2462      double total_ms,
2463      double other_ms,
2464      double root_region_scan_wait_ms,
2465      double parallel_ms,
2466      double ext_root_scan_ms,
2467      double satb_filtering_ms,
2468      double update_rs_ms,
2469      double scan_rs_ms,
2470      double obj_copy_ms,
2471      double termination_ms,
2472      double parallel_other_ms,
2473      double clear_ct_ms)
2474 {
2475   if(TraceGen0Time) {
2476     _total.add(total_ms);
2477     _other.add(other_ms);
2478     _root_region_scan_wait.add(root_region_scan_wait_ms);
2479     _parallel.add(parallel_ms);
2480     _ext_root_scan.add(ext_root_scan_ms);
2481     _satb_filtering.add(satb_filtering_ms);
2482     _update_rs.add(update_rs_ms);
2483     _scan_rs.add(scan_rs_ms);
2484     _obj_copy.add(obj_copy_ms);
2485     _termination.add(termination_ms);
2486     _parallel_other.add(parallel_other_ms);
2487     _clear_ct.add(clear_ct_ms);
2488   }
2489 }
2490 
2491 void TraceGen0TimeData::increment_collection_count(bool mixed) {
2492   if(TraceGen0Time) {
2493     if (mixed) {
2494       ++_mixed_pause_num;
2495     } else {
2496       ++_young_pause_num;
2497     }
2498   }
2499 }
2500 
2501 void TraceGen0TimeData::print_summary(int level,
2502                                       const char* str,
2503                                       const NumberSeq* seq) const {
2504   double sum = seq->sum();
2505   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
2506                 str, sum / 1000.0, seq->avg());
2507 }
2508 
2509 void TraceGen0TimeData::print_summary_sd(int level,
2510                                          const char* str,
2511                                          const NumberSeq* seq) const {
2512   print_summary(level, str, seq);
2513   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2514                 seq->num(), seq->sd(), seq->maximum());
2515 }
2516 
2517 void TraceGen0TimeData::print() const {
2518   if (!TraceGen0Time) {
2519     return;
2520   }
2521 
2522   gclog_or_tty->print_cr("ALL PAUSES");
2523   print_summary_sd(0, "Total", &_total);
2524   gclog_or_tty->print_cr("");
2525   gclog_or_tty->print_cr("");
2526   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2527   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2528   gclog_or_tty->print_cr("");
2529 
2530   gclog_or_tty->print_cr("EVACUATION PAUSES");
2531 
2532   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2533     gclog_or_tty->print_cr("none");
2534   } else {
2535     print_summary_sd(0, "Evacuation Pauses", &_total);
2536     print_summary(1, "Root Region Scan Wait", &_root_region_scan_wait);
2537     print_summary(1, "Parallel Time", &_parallel);
2538     print_summary(2, "Ext Root Scanning", &_ext_root_scan);
2539     print_summary(2, "SATB Filtering", &_satb_filtering);
2540     print_summary(2, "Update RS", &_update_rs);
2541     print_summary(2, "Scan RS", &_scan_rs);
2542     print_summary(2, "Object Copy", &_obj_copy);
2543     print_summary(2, "Termination", &_termination);
2544     print_summary(2, "Parallel Other", &_parallel_other);
2545     print_summary(1, "Clear CT", &_clear_ct);
2546     print_summary(1, "Other", &_other);
2547   }
2548   gclog_or_tty->print_cr("");
2549 
2550   gclog_or_tty->print_cr("MISC");
2551   print_summary_sd(0, "Stop World", &_all_stop_world_times_ms);
2552   print_summary_sd(0, "Yields", &_all_yield_times_ms);
2553 }
2554 
2555 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2556   if (TraceGen1Time) {
2557     _all_full_gc_times.add(full_gc_time_ms);
2558   }
2559 }
2560 
2561 void TraceGen1TimeData::print() const {
2562   if (!TraceGen1Time) {
2563     return;
2564   }
2565 
2566   if (_all_full_gc_times.num() > 0) {
2567     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2568       _all_full_gc_times.num(),
2569       _all_full_gc_times.sum() / 1000.0);
2570     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2571     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2572       _all_full_gc_times.sd(),
2573       _all_full_gc_times.maximum());
2574   }
2575 }