rev 9305 : imported patch 8140597-forcing-initial-mark-causes-abort-mixed-collections rev 9306 : imported patch 8139874-after-full-gc-next-gc-is-always-young-only rev 9307 : imported patch 8138740-start-initial-mark-right-after-mixed-gc-if-needed rev 9309 : imported patch 8140689-skip-last-young-if-nothing-to-do-in-mixed rev 9310 : dihop-changes rev 9312 : imported patch 8136678-implement-adaptive-sizing-algorithm-for-IHOP rev 9314 : imported patch 8136679-jfr-event-for-dynamic-ihop rev 9315 : imported patch sangheon-review
1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentMark.hpp" 28 #include "gc/g1/concurrentMarkThread.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1IHOPControl.hpp" 32 #include "gc/g1/g1ErgoVerbose.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1Log.hpp" 35 #include "gc/g1/heapRegion.inline.hpp" 36 #include "gc/g1/heapRegionRemSet.hpp" 37 #include "gc/shared/gcPolicyCounters.hpp" 38 #include "runtime/arguments.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "utilities/debug.hpp" 42 43 // Different defaults for different number of GC threads 44 // They were chosen by running GCOld and SPECjbb on debris with different 45 // numbers of GC threads and choosing them based on the results 46 47 // all the same 48 static double rs_length_diff_defaults[] = { 49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 50 }; 51 52 static double cost_per_card_ms_defaults[] = { 53 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 54 }; 55 56 // all the same 57 static double young_cards_per_entry_ratio_defaults[] = { 58 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 59 }; 60 61 static double cost_per_entry_ms_defaults[] = { 62 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 63 }; 64 65 static double cost_per_byte_ms_defaults[] = { 66 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 67 }; 68 69 // these should be pretty consistent 70 static double constant_other_time_ms_defaults[] = { 71 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 72 }; 73 74 75 static double young_other_cost_per_region_ms_defaults[] = { 76 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 77 }; 78 79 static double non_young_other_cost_per_region_ms_defaults[] = { 80 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 81 }; 82 83 G1CollectorPolicy::G1CollectorPolicy() : 84 _predictor(G1ConfidencePercent / 100.0), 85 _parallel_gc_threads(ParallelGCThreads), 86 87 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 88 _stop_world_start(0.0), 89 90 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 91 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 92 93 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _prev_collection_pause_end_ms(0.0), 95 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 104 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 105 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 106 _non_young_other_cost_per_region_ms_seq( 107 new TruncatedSeq(TruncatedSeqLength)), 108 109 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 110 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 111 112 _pause_time_target_ms((double) MaxGCPauseMillis), 113 114 _recent_prev_end_times_for_all_gcs_sec( 115 new TruncatedSeq(NumPrevPausesForHeuristics)), 116 117 _recent_avg_pause_time_ratio(0.0), 118 _rs_lengths_prediction(0), 119 _max_survivor_regions(0), 120 121 _eden_used_bytes_before_gc(0), 122 _survivor_used_bytes_before_gc(0), 123 _heap_used_bytes_before_gc(0), 124 _metaspace_used_bytes_before_gc(0), 125 _eden_capacity_bytes_before_gc(0), 126 _heap_capacity_bytes_before_gc(0), 127 128 _eden_cset_region_length(0), 129 _survivor_cset_region_length(0), 130 _old_cset_region_length(0), 131 132 _collection_set(NULL), 133 _collection_set_bytes_used_before(0), 134 135 // Incremental CSet attributes 136 _inc_cset_build_state(Inactive), 137 _inc_cset_head(NULL), 138 _inc_cset_tail(NULL), 139 _inc_cset_bytes_used_before(0), 140 _inc_cset_max_finger(NULL), 141 _inc_cset_recorded_rs_lengths(0), 142 _inc_cset_recorded_rs_lengths_diffs(0), 143 _inc_cset_predicted_elapsed_time_ms(0.0), 144 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 145 146 // add here any more surv rate groups 147 _recorded_survivor_regions(0), 148 _recorded_survivor_head(NULL), 149 _recorded_survivor_tail(NULL), 150 _survivors_age_table(true), 151 152 _gc_overhead_perc(0.0), 153 154 _last_old_allocated_bytes(0), 155 _ihop_control(NULL), 156 _initial_mark_to_mixed() { 157 158 // SurvRateGroups below must be initialized after the predictor because they 159 // indirectly use it through this object passed to their constructor. 160 _short_lived_surv_rate_group = 161 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary); 162 _survivor_surv_rate_group = 163 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary); 164 165 // Set up the region size and associated fields. Given that the 166 // policy is created before the heap, we have to set this up here, 167 // so it's done as soon as possible. 168 169 // It would have been natural to pass initial_heap_byte_size() and 170 // max_heap_byte_size() to setup_heap_region_size() but those have 171 // not been set up at this point since they should be aligned with 172 // the region size. So, there is a circular dependency here. We base 173 // the region size on the heap size, but the heap size should be 174 // aligned with the region size. To get around this we use the 175 // unaligned values for the heap. 176 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 177 HeapRegionRemSet::setup_remset_size(); 178 179 G1ErgoVerbose::initialize(); 180 if (PrintAdaptiveSizePolicy) { 181 // Currently, we only use a single switch for all the heuristics. 182 G1ErgoVerbose::set_enabled(true); 183 // Given that we don't currently have a verboseness level 184 // parameter, we'll hardcode this to high. This can be easily 185 // changed in the future. 186 G1ErgoVerbose::set_level(ErgoHigh); 187 } else { 188 G1ErgoVerbose::set_enabled(false); 189 } 190 191 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 192 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 193 194 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 195 196 int index = MIN2(_parallel_gc_threads - 1, 7); 197 198 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 199 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 200 _cost_scan_hcc_seq->add(0.0); 201 _young_cards_per_entry_ratio_seq->add( 202 young_cards_per_entry_ratio_defaults[index]); 203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 206 _young_other_cost_per_region_ms_seq->add( 207 young_other_cost_per_region_ms_defaults[index]); 208 _non_young_other_cost_per_region_ms_seq->add( 209 non_young_other_cost_per_region_ms_defaults[index]); 210 211 // Below, we might need to calculate the pause time target based on 212 // the pause interval. When we do so we are going to give G1 maximum 213 // flexibility and allow it to do pauses when it needs to. So, we'll 214 // arrange that the pause interval to be pause time target + 1 to 215 // ensure that a) the pause time target is maximized with respect to 216 // the pause interval and b) we maintain the invariant that pause 217 // time target < pause interval. If the user does not want this 218 // maximum flexibility, they will have to set the pause interval 219 // explicitly. 220 221 // First make sure that, if either parameter is set, its value is 222 // reasonable. 223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 224 if (MaxGCPauseMillis < 1) { 225 vm_exit_during_initialization("MaxGCPauseMillis should be " 226 "greater than 0"); 227 } 228 } 229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 230 if (GCPauseIntervalMillis < 1) { 231 vm_exit_during_initialization("GCPauseIntervalMillis should be " 232 "greater than 0"); 233 } 234 } 235 236 // Then, if the pause time target parameter was not set, set it to 237 // the default value. 238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 240 // The default pause time target in G1 is 200ms 241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 242 } else { 243 // We do not allow the pause interval to be set without the 244 // pause time target 245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 246 "without setting MaxGCPauseMillis"); 247 } 248 } 249 250 // Then, if the interval parameter was not set, set it according to 251 // the pause time target (this will also deal with the case when the 252 // pause time target is the default value). 253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 255 } 256 257 // Finally, make sure that the two parameters are consistent. 258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 259 char buffer[256]; 260 jio_snprintf(buffer, 256, 261 "MaxGCPauseMillis (%u) should be less than " 262 "GCPauseIntervalMillis (%u)", 263 MaxGCPauseMillis, GCPauseIntervalMillis); 264 vm_exit_during_initialization(buffer); 265 } 266 267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 268 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 270 271 // start conservatively (around 50ms is about right) 272 _concurrent_mark_remark_times_ms->add(0.05); 273 _concurrent_mark_cleanup_times_ms->add(0.20); 274 _tenuring_threshold = MaxTenuringThreshold; 275 276 assert(GCTimeRatio > 0, 277 "we should have set it to a default value set_g1_gc_flags() " 278 "if a user set it to 0"); 279 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 280 281 uintx reserve_perc = G1ReservePercent; 282 // Put an artificial ceiling on this so that it's not set to a silly value. 283 if (reserve_perc > 50) { 284 reserve_perc = 50; 285 warning("G1ReservePercent is set to a value that is too large, " 286 "it's been updated to " UINTX_FORMAT, reserve_perc); 287 } 288 _reserve_factor = (double) reserve_perc / 100.0; 289 // This will be set when the heap is expanded 290 // for the first time during initialization. 291 _reserve_regions = 0; 292 293 _collectionSetChooser = new CollectionSetChooser(); 294 } 295 296 G1CollectorPolicy::~G1CollectorPolicy() { 297 if (_ihop_control != NULL) { 298 delete _ihop_control; 299 } 300 } 301 302 double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const { 303 return _predictor.get_new_prediction(seq); 304 } 305 306 void G1CollectorPolicy::initialize_alignments() { 307 _space_alignment = HeapRegion::GrainBytes; 308 size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint(); 309 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); 310 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); 311 } 312 313 void G1CollectorPolicy::initialize_flags() { 314 if (G1HeapRegionSize != HeapRegion::GrainBytes) { 315 FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes); 316 } 317 318 if (SurvivorRatio < 1) { 319 vm_exit_during_initialization("Invalid survivor ratio specified"); 320 } 321 CollectorPolicy::initialize_flags(); 322 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 323 } 324 325 void G1CollectorPolicy::post_heap_initialize() { 326 uintx max_regions = G1CollectedHeap::heap()->max_regions(); 327 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; 328 if (max_young_size != MaxNewSize) { 329 FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size); 330 } 331 332 _ihop_control = create_ihop_control(); 333 } 334 335 G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); } 336 337 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), 338 _min_desired_young_length(0), _max_desired_young_length(0) { 339 if (FLAG_IS_CMDLINE(NewRatio)) { 340 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 341 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 342 } else { 343 _sizer_kind = SizerNewRatio; 344 _adaptive_size = false; 345 return; 346 } 347 } 348 349 if (NewSize > MaxNewSize) { 350 if (FLAG_IS_CMDLINE(MaxNewSize)) { 351 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 352 "A new max generation size of " SIZE_FORMAT "k will be used.", 353 NewSize/K, MaxNewSize/K, NewSize/K); 354 } 355 MaxNewSize = NewSize; 356 } 357 358 if (FLAG_IS_CMDLINE(NewSize)) { 359 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 360 1U); 361 if (FLAG_IS_CMDLINE(MaxNewSize)) { 362 _max_desired_young_length = 363 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 364 1U); 365 _sizer_kind = SizerMaxAndNewSize; 366 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 367 } else { 368 _sizer_kind = SizerNewSizeOnly; 369 } 370 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 371 _max_desired_young_length = 372 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 373 1U); 374 _sizer_kind = SizerMaxNewSizeOnly; 375 } 376 } 377 378 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 379 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 380 return MAX2(1U, default_value); 381 } 382 383 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 384 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 385 return MAX2(1U, default_value); 386 } 387 388 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { 389 assert(number_of_heap_regions > 0, "Heap must be initialized"); 390 391 switch (_sizer_kind) { 392 case SizerDefaults: 393 *min_young_length = calculate_default_min_length(number_of_heap_regions); 394 *max_young_length = calculate_default_max_length(number_of_heap_regions); 395 break; 396 case SizerNewSizeOnly: 397 *max_young_length = calculate_default_max_length(number_of_heap_regions); 398 *max_young_length = MAX2(*min_young_length, *max_young_length); 399 break; 400 case SizerMaxNewSizeOnly: 401 *min_young_length = calculate_default_min_length(number_of_heap_regions); 402 *min_young_length = MIN2(*min_young_length, *max_young_length); 403 break; 404 case SizerMaxAndNewSize: 405 // Do nothing. Values set on the command line, don't update them at runtime. 406 break; 407 case SizerNewRatio: 408 *min_young_length = number_of_heap_regions / (NewRatio + 1); 409 *max_young_length = *min_young_length; 410 break; 411 default: 412 ShouldNotReachHere(); 413 } 414 415 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); 416 } 417 418 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { 419 // We need to pass the desired values because recalculation may not update these 420 // values in some cases. 421 uint temp = _min_desired_young_length; 422 uint result = _max_desired_young_length; 423 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); 424 return result; 425 } 426 427 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 428 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, 429 &_max_desired_young_length); 430 } 431 432 void G1CollectorPolicy::init() { 433 // Set aside an initial future to_space. 434 _g1 = G1CollectedHeap::heap(); 435 436 assert(Heap_lock->owned_by_self(), "Locking discipline."); 437 438 initialize_gc_policy_counters(); 439 440 if (adaptive_young_list_length()) { 441 _young_list_fixed_length = 0; 442 } else { 443 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 444 } 445 _free_regions_at_end_of_collection = _g1->num_free_regions(); 446 447 update_young_list_max_and_target_length(); 448 // We may immediately start allocating regions and placing them on the 449 // collection set list. Initialize the per-collection set info 450 start_incremental_cset_building(); 451 } 452 453 void G1CollectorPolicy::note_gc_start(uint num_active_workers) { 454 phase_times()->note_gc_start(num_active_workers); 455 } 456 457 // Create the jstat counters for the policy. 458 void G1CollectorPolicy::initialize_gc_policy_counters() { 459 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 460 } 461 462 bool G1CollectorPolicy::predict_will_fit(uint young_length, 463 double base_time_ms, 464 uint base_free_regions, 465 double target_pause_time_ms) const { 466 if (young_length >= base_free_regions) { 467 // end condition 1: not enough space for the young regions 468 return false; 469 } 470 471 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 472 size_t bytes_to_copy = 473 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 474 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 475 double young_other_time_ms = predict_young_other_time_ms(young_length); 476 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 477 if (pause_time_ms > target_pause_time_ms) { 478 // end condition 2: prediction is over the target pause time 479 return false; 480 } 481 482 size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes; 483 if ((2.0 /* magic */ * _predictor.sigma()) * bytes_to_copy > free_bytes) { 484 // end condition 3: out-of-space (conservatively!) 485 return false; 486 } 487 488 // success! 489 return true; 490 } 491 492 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 493 // re-calculate the necessary reserve 494 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 495 // We use ceiling so that if reserve_regions_d is > 0.0 (but 496 // smaller than 1.0) we'll get 1. 497 _reserve_regions = (uint) ceil(reserve_regions_d); 498 499 _young_gen_sizer->heap_size_changed(new_number_of_regions); 500 } 501 502 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 503 uint base_min_length) const { 504 uint desired_min_length = 0; 505 if (adaptive_young_list_length()) { 506 if (_alloc_rate_ms_seq->num() > 3) { 507 double now_sec = os::elapsedTime(); 508 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 509 double alloc_rate_ms = predict_alloc_rate_ms(); 510 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 511 } else { 512 // otherwise we don't have enough info to make the prediction 513 } 514 } 515 desired_min_length += base_min_length; 516 // make sure we don't go below any user-defined minimum bound 517 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 518 } 519 520 uint G1CollectorPolicy::calculate_young_list_desired_max_length() const { 521 // Here, we might want to also take into account any additional 522 // constraints (i.e., user-defined minimum bound). Currently, we 523 // effectively don't set this bound. 524 return _young_gen_sizer->max_desired_young_length(); 525 } 526 527 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t* unbounded_target_length) { 528 update_young_list_max_and_target_length(get_new_prediction(_rs_lengths_seq), unbounded_target_length); 529 } 530 531 void G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths, size_t* unbounded_target_length) { 532 update_young_list_target_length(rs_lengths, unbounded_target_length); 533 update_max_gc_locker_expansion(); 534 } 535 536 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) { 537 _young_list_target_length = bounded_young_list_target_length(rs_lengths, unbounded_target_length); 538 } 539 540 void G1CollectorPolicy::update_young_list_target_length() { 541 update_young_list_target_length(get_new_prediction(_rs_lengths_seq)); 542 } 543 544 uint G1CollectorPolicy::bounded_young_list_target_length(size_t rs_lengths, size_t* unbounded_target_length) const { 545 // Calculate the absolute and desired min bounds. 546 547 // This is how many young regions we already have (currently: the survivors). 548 uint base_min_length = recorded_survivor_regions(); 549 uint desired_min_length = calculate_young_list_desired_min_length(base_min_length); 550 // This is the absolute minimum young length. Ensure that we 551 // will at least have one eden region available for allocation. 552 uint absolute_min_length = base_min_length + MAX2(_g1->young_list()->eden_length(), (uint)1); 553 // If we shrank the young list target it should not shrink below the current size. 554 desired_min_length = MAX2(desired_min_length, absolute_min_length); 555 // Calculate the absolute and desired max bounds. 556 557 uint desired_max_length = calculate_young_list_desired_max_length(); 558 559 uint young_list_target_length = 0; 560 if (adaptive_young_list_length()) { 561 if (collector_state()->gcs_are_young()) { 562 young_list_target_length = 563 calculate_young_list_target_length(rs_lengths, 564 base_min_length, 565 desired_min_length, 566 desired_max_length); 567 } else { 568 // Don't calculate anything and let the code below bound it to 569 // the desired_min_length, i.e., do the next GC as soon as 570 // possible to maximize how many old regions we can add to it. 571 } 572 } else { 573 // The user asked for a fixed young gen so we'll fix the young gen 574 // whether the next GC is young or mixed. 575 young_list_target_length = _young_list_fixed_length; 576 } 577 578 if (unbounded_target_length != NULL) { 579 *unbounded_target_length = young_list_target_length; 580 } 581 582 // We will try our best not to "eat" into the reserve. 583 uint absolute_max_length = 0; 584 if (_free_regions_at_end_of_collection > _reserve_regions) { 585 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 586 } 587 if (desired_max_length > absolute_max_length) { 588 desired_max_length = absolute_max_length; 589 } 590 591 // Make sure we don't go over the desired max length, nor under the 592 // desired min length. In case they clash, desired_min_length wins 593 // which is why that test is second. 594 if (young_list_target_length > desired_max_length) { 595 young_list_target_length = desired_max_length; 596 } 597 if (young_list_target_length < desired_min_length) { 598 young_list_target_length = desired_min_length; 599 } 600 601 assert(young_list_target_length > recorded_survivor_regions(), 602 "we should be able to allocate at least one eden region"); 603 assert(young_list_target_length >= absolute_min_length, "post-condition"); 604 605 return young_list_target_length; 606 } 607 608 uint 609 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 610 uint base_min_length, 611 uint desired_min_length, 612 uint desired_max_length) const { 613 assert(adaptive_young_list_length(), "pre-condition"); 614 assert(collector_state()->gcs_are_young(), "only call this for young GCs"); 615 616 // In case some edge-condition makes the desired max length too small... 617 if (desired_max_length <= desired_min_length) { 618 return desired_min_length; 619 } 620 621 // We'll adjust min_young_length and max_young_length not to include 622 // the already allocated young regions (i.e., so they reflect the 623 // min and max eden regions we'll allocate). The base_min_length 624 // will be reflected in the predictions by the 625 // survivor_regions_evac_time prediction. 626 assert(desired_min_length > base_min_length, "invariant"); 627 uint min_young_length = desired_min_length - base_min_length; 628 assert(desired_max_length > base_min_length, "invariant"); 629 uint max_young_length = desired_max_length - base_min_length; 630 631 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 632 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 633 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 634 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 635 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 636 double base_time_ms = 637 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 638 survivor_regions_evac_time; 639 uint available_free_regions = _free_regions_at_end_of_collection; 640 uint base_free_regions = 0; 641 if (available_free_regions > _reserve_regions) { 642 base_free_regions = available_free_regions - _reserve_regions; 643 } 644 645 // Here, we will make sure that the shortest young length that 646 // makes sense fits within the target pause time. 647 648 if (predict_will_fit(min_young_length, base_time_ms, 649 base_free_regions, target_pause_time_ms)) { 650 // The shortest young length will fit into the target pause time; 651 // we'll now check whether the absolute maximum number of young 652 // regions will fit in the target pause time. If not, we'll do 653 // a binary search between min_young_length and max_young_length. 654 if (predict_will_fit(max_young_length, base_time_ms, 655 base_free_regions, target_pause_time_ms)) { 656 // The maximum young length will fit into the target pause time. 657 // We are done so set min young length to the maximum length (as 658 // the result is assumed to be returned in min_young_length). 659 min_young_length = max_young_length; 660 } else { 661 // The maximum possible number of young regions will not fit within 662 // the target pause time so we'll search for the optimal 663 // length. The loop invariants are: 664 // 665 // min_young_length < max_young_length 666 // min_young_length is known to fit into the target pause time 667 // max_young_length is known not to fit into the target pause time 668 // 669 // Going into the loop we know the above hold as we've just 670 // checked them. Every time around the loop we check whether 671 // the middle value between min_young_length and 672 // max_young_length fits into the target pause time. If it 673 // does, it becomes the new min. If it doesn't, it becomes 674 // the new max. This way we maintain the loop invariants. 675 676 assert(min_young_length < max_young_length, "invariant"); 677 uint diff = (max_young_length - min_young_length) / 2; 678 while (diff > 0) { 679 uint young_length = min_young_length + diff; 680 if (predict_will_fit(young_length, base_time_ms, 681 base_free_regions, target_pause_time_ms)) { 682 min_young_length = young_length; 683 } else { 684 max_young_length = young_length; 685 } 686 assert(min_young_length < max_young_length, "invariant"); 687 diff = (max_young_length - min_young_length) / 2; 688 } 689 // The results is min_young_length which, according to the 690 // loop invariants, should fit within the target pause time. 691 692 // These are the post-conditions of the binary search above: 693 assert(min_young_length < max_young_length, 694 "otherwise we should have discovered that max_young_length " 695 "fits into the pause target and not done the binary search"); 696 assert(predict_will_fit(min_young_length, base_time_ms, 697 base_free_regions, target_pause_time_ms), 698 "min_young_length, the result of the binary search, should " 699 "fit into the pause target"); 700 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 701 base_free_regions, target_pause_time_ms), 702 "min_young_length, the result of the binary search, should be " 703 "optimal, so no larger length should fit into the pause target"); 704 } 705 } else { 706 // Even the minimum length doesn't fit into the pause time 707 // target, return it as the result nevertheless. 708 } 709 return base_min_length + min_young_length; 710 } 711 712 double G1CollectorPolicy::predict_survivor_regions_evac_time() const { 713 double survivor_regions_evac_time = 0.0; 714 for (HeapRegion * r = _recorded_survivor_head; 715 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 716 r = r->get_next_young_region()) { 717 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); 718 } 719 return survivor_regions_evac_time; 720 } 721 722 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 723 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 724 725 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 726 if (rs_lengths > _rs_lengths_prediction) { 727 // add 10% to avoid having to recalculate often 728 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 729 update_rs_lengths_prediction(rs_lengths_prediction); 730 731 update_young_list_max_and_target_length(rs_lengths_prediction); 732 } 733 } 734 735 void G1CollectorPolicy::update_rs_lengths_prediction() { 736 update_rs_lengths_prediction(get_new_prediction(_rs_lengths_seq)); 737 } 738 739 void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) { 740 if (collector_state()->gcs_are_young() && adaptive_young_list_length()) { 741 _rs_lengths_prediction = prediction; 742 } 743 } 744 745 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 746 bool is_tlab, 747 bool* gc_overhead_limit_was_exceeded) { 748 guarantee(false, "Not using this policy feature yet."); 749 return NULL; 750 } 751 752 // This method controls how a collector handles one or more 753 // of its generations being fully allocated. 754 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 755 bool is_tlab) { 756 guarantee(false, "Not using this policy feature yet."); 757 return NULL; 758 } 759 760 761 #ifndef PRODUCT 762 bool G1CollectorPolicy::verify_young_ages() { 763 HeapRegion* head = _g1->young_list()->first_region(); 764 return 765 verify_young_ages(head, _short_lived_surv_rate_group); 766 // also call verify_young_ages on any additional surv rate groups 767 } 768 769 bool 770 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 771 SurvRateGroup *surv_rate_group) { 772 guarantee( surv_rate_group != NULL, "pre-condition" ); 773 774 const char* name = surv_rate_group->name(); 775 bool ret = true; 776 int prev_age = -1; 777 778 for (HeapRegion* curr = head; 779 curr != NULL; 780 curr = curr->get_next_young_region()) { 781 SurvRateGroup* group = curr->surv_rate_group(); 782 if (group == NULL && !curr->is_survivor()) { 783 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 784 ret = false; 785 } 786 787 if (surv_rate_group == group) { 788 int age = curr->age_in_surv_rate_group(); 789 790 if (age < 0) { 791 gclog_or_tty->print_cr("## %s: encountered negative age", name); 792 ret = false; 793 } 794 795 if (age <= prev_age) { 796 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 797 "(%d, %d)", name, age, prev_age); 798 ret = false; 799 } 800 prev_age = age; 801 } 802 } 803 804 return ret; 805 } 806 #endif // PRODUCT 807 808 void G1CollectorPolicy::record_full_collection_start() { 809 _full_collection_start_sec = os::elapsedTime(); 810 record_heap_size_info_at_start(true /* full */); 811 // Release the future to-space so that it is available for compaction into. 812 collector_state()->set_full_collection(true); 813 } 814 815 void G1CollectorPolicy::record_full_collection_end() { 816 // Consider this like a collection pause for the purposes of allocation 817 // since last pause. 818 double end_sec = os::elapsedTime(); 819 double full_gc_time_sec = end_sec - _full_collection_start_sec; 820 double full_gc_time_ms = full_gc_time_sec * 1000.0; 821 822 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms); 823 824 update_recent_gc_times(end_sec, full_gc_time_ms); 825 826 collector_state()->set_full_collection(false); 827 828 // "Nuke" the heuristics that control the young/mixed GC 829 // transitions and make sure we start with young GCs after the Full GC. 830 collector_state()->set_gcs_are_young(true); 831 collector_state()->set_last_young_gc(false); 832 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); 833 collector_state()->set_during_initial_mark_pause(false); 834 collector_state()->set_in_marking_window(false); 835 collector_state()->set_in_marking_window_im(false); 836 837 _short_lived_surv_rate_group->start_adding_regions(); 838 // also call this on any additional surv rate groups 839 840 record_survivor_regions(0, NULL, NULL); 841 842 _free_regions_at_end_of_collection = _g1->num_free_regions(); 843 // Reset survivors SurvRateGroup. 844 _survivor_surv_rate_group->reset(); 845 update_young_list_max_and_target_length(); 846 update_rs_lengths_prediction(); 847 _collectionSetChooser->clear(); 848 849 _last_old_allocated_bytes = 0; 850 851 record_pause(FullGC, _full_collection_start_sec, end_sec); 852 } 853 854 void G1CollectorPolicy::record_stop_world_start() { 855 _stop_world_start = os::elapsedTime(); 856 } 857 858 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 859 // We only need to do this here as the policy will only be applied 860 // to the GC we're about to start. so, no point is calculating this 861 // every time we calculate / recalculate the target young length. 862 update_survivors_policy(); 863 864 assert(_g1->used() == _g1->recalculate_used(), 865 "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT, 866 _g1->used(), _g1->recalculate_used()); 867 868 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 869 _trace_young_gen_time_data.record_start_collection(s_w_t_ms); 870 _stop_world_start = 0.0; 871 872 record_heap_size_info_at_start(false /* full */); 873 874 phase_times()->record_cur_collection_start_sec(start_time_sec); 875 _pending_cards = _g1->pending_card_num(); 876 877 _collection_set_bytes_used_before = 0; 878 _bytes_copied_during_gc = 0; 879 880 collector_state()->set_last_gc_was_young(false); 881 882 // do that for any other surv rate groups 883 _short_lived_surv_rate_group->stop_adding_regions(); 884 _survivors_age_table.clear(); 885 886 assert( verify_young_ages(), "region age verification" ); 887 } 888 889 void G1CollectorPolicy::record_concurrent_mark_init_end(double 890 mark_init_elapsed_time_ms) { 891 collector_state()->set_during_marking(true); 892 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); 893 collector_state()->set_during_initial_mark_pause(false); 894 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 895 } 896 897 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 898 _mark_remark_start_sec = os::elapsedTime(); 899 collector_state()->set_during_marking(false); 900 } 901 902 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 903 double end_time_sec = os::elapsedTime(); 904 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 905 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 906 _cur_mark_stop_world_time_ms += elapsed_time_ms; 907 _prev_collection_pause_end_ms += elapsed_time_ms; 908 909 record_pause(Remark, _mark_remark_start_sec, end_time_sec); 910 } 911 912 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 913 _mark_cleanup_start_sec = os::elapsedTime(); 914 } 915 916 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 917 bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc", 918 "skip last young-only gc"); 919 collector_state()->set_last_young_gc(should_continue_with_reclaim); 920 // We abort the marking phase. 921 if (!should_continue_with_reclaim) { 922 abort_time_to_mixed_tracking(); 923 } 924 collector_state()->set_in_marking_window(false); 925 } 926 927 void G1CollectorPolicy::record_concurrent_pause() { 928 if (_stop_world_start > 0.0) { 929 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 930 _trace_young_gen_time_data.record_yield_time(yield_ms); 931 } 932 } 933 934 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const { 935 return phase_times()->average_time_ms(phase); 936 } 937 938 double G1CollectorPolicy::young_other_time_ms() const { 939 return phase_times()->young_cset_choice_time_ms() + 940 phase_times()->young_free_cset_time_ms(); 941 } 942 943 double G1CollectorPolicy::non_young_other_time_ms() const { 944 return phase_times()->non_young_cset_choice_time_ms() + 945 phase_times()->non_young_free_cset_time_ms(); 946 947 } 948 949 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const { 950 return pause_time_ms - 951 average_time_ms(G1GCPhaseTimes::UpdateRS) - 952 average_time_ms(G1GCPhaseTimes::ScanRS) - 953 average_time_ms(G1GCPhaseTimes::ObjCopy) - 954 average_time_ms(G1GCPhaseTimes::Termination); 955 } 956 957 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const { 958 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); 959 } 960 961 bool G1CollectorPolicy::about_to_start_mixed_phase() const { 962 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); 963 } 964 965 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 966 if (about_to_start_mixed_phase()) { 967 return false; 968 } 969 970 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold(); 971 972 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 973 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 974 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; 975 976 if (marking_request_bytes > marking_initiating_used_threshold) { 977 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) { 978 ergo_verbose5(ErgoConcCycles, 979 "request concurrent cycle initiation", 980 ergo_format_reason("occupancy higher than threshold") 981 ergo_format_byte("occupancy") 982 ergo_format_byte("allocation request") 983 ergo_format_byte_perc("threshold") 984 ergo_format_str("source"), 985 cur_used_bytes, 986 alloc_byte_size, 987 marking_initiating_used_threshold, 988 (double) marking_initiating_used_threshold / _g1->capacity() * 100, 989 source); 990 return true; 991 } else { 992 ergo_verbose5(ErgoConcCycles, 993 "do not request concurrent cycle initiation", 994 ergo_format_reason("still doing mixed collections") 995 ergo_format_byte("occupancy") 996 ergo_format_byte("allocation request") 997 ergo_format_byte_perc("threshold") 998 ergo_format_str("source"), 999 cur_used_bytes, 1000 alloc_byte_size, 1001 marking_initiating_used_threshold, 1002 (double) InitiatingHeapOccupancyPercent, 1003 source); 1004 } 1005 } 1006 1007 return false; 1008 } 1009 1010 // Anything below that is considered to be zero 1011 #define MIN_TIMER_GRANULARITY 0.0000001 1012 1013 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) { 1014 double end_time_sec = os::elapsedTime(); 1015 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 1016 "otherwise, the subtraction below does not make sense"); 1017 size_t cur_used_bytes = _g1->used(); 1018 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1019 bool last_pause_included_initial_mark = false; 1020 bool update_stats = !_g1->evacuation_failed(); 1021 1022 #ifndef PRODUCT 1023 if (G1YoungSurvRateVerbose) { 1024 gclog_or_tty->cr(); 1025 _short_lived_surv_rate_group->print(); 1026 // do that for any other surv rate groups too 1027 } 1028 #endif // PRODUCT 1029 1030 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); 1031 1032 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); 1033 if (last_pause_included_initial_mark) { 1034 record_concurrent_mark_init_end(0.0); 1035 } else { 1036 maybe_start_marking(); 1037 } 1038 1039 double app_time_ms = 1.0; 1040 1041 if (update_stats) { 1042 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); 1043 // this is where we update the allocation rate of the application 1044 app_time_ms = 1045 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 1046 if (app_time_ms < MIN_TIMER_GRANULARITY) { 1047 // This usually happens due to the timer not having the required 1048 // granularity. Some Linuxes are the usual culprits. 1049 // We'll just set it to something (arbitrarily) small. 1050 app_time_ms = 1.0; 1051 } 1052 // We maintain the invariant that all objects allocated by mutator 1053 // threads will be allocated out of eden regions. So, we can use 1054 // the eden region number allocated since the previous GC to 1055 // calculate the application's allocate rate. The only exception 1056 // to that is humongous objects that are allocated separately. But 1057 // given that humongous object allocations do not really affect 1058 // either the pause's duration nor when the next pause will take 1059 // place we can safely ignore them here. 1060 uint regions_allocated = eden_cset_region_length(); 1061 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 1062 _alloc_rate_ms_seq->add(alloc_rate_ms); 1063 1064 double interval_ms = 1065 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 1066 update_recent_gc_times(end_time_sec, pause_time_ms); 1067 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 1068 if (recent_avg_pause_time_ratio() < 0.0 || 1069 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 1070 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1071 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1072 if (_recent_avg_pause_time_ratio < 0.0) { 1073 _recent_avg_pause_time_ratio = 0.0; 1074 } else { 1075 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1076 _recent_avg_pause_time_ratio = 1.0; 1077 } 1078 } 1079 } 1080 1081 bool new_in_marking_window = collector_state()->in_marking_window(); 1082 bool new_in_marking_window_im = false; 1083 if (last_pause_included_initial_mark) { 1084 new_in_marking_window = true; 1085 new_in_marking_window_im = true; 1086 } 1087 1088 if (collector_state()->last_young_gc()) { 1089 // This is supposed to to be the "last young GC" before we start 1090 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1091 assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC"); 1092 1093 if (next_gc_should_be_mixed("start mixed GCs", 1094 "do not start mixed GCs")) { 1095 collector_state()->set_gcs_are_young(false); 1096 } else { 1097 // We aborted the mixed GC phase early. 1098 abort_time_to_mixed_tracking(); 1099 } 1100 1101 collector_state()->set_last_young_gc(false); 1102 } 1103 1104 if (!collector_state()->last_gc_was_young()) { 1105 // This is a mixed GC. Here we decide whether to continue doing 1106 // mixed GCs or not. 1107 if (!next_gc_should_be_mixed("continue mixed GCs", 1108 "do not continue mixed GCs")) { 1109 collector_state()->set_gcs_are_young(true); 1110 1111 maybe_start_marking(); 1112 } 1113 } 1114 1115 _short_lived_surv_rate_group->start_adding_regions(); 1116 // Do that for any other surv rate groups 1117 1118 if (update_stats) { 1119 double cost_per_card_ms = 0.0; 1120 double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC); 1121 if (_pending_cards > 0) { 1122 cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards; 1123 _cost_per_card_ms_seq->add(cost_per_card_ms); 1124 } 1125 _cost_scan_hcc_seq->add(cost_scan_hcc); 1126 1127 double cost_per_entry_ms = 0.0; 1128 if (cards_scanned > 10) { 1129 cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; 1130 if (collector_state()->last_gc_was_young()) { 1131 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1132 } else { 1133 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1134 } 1135 } 1136 1137 if (_max_rs_lengths > 0) { 1138 double cards_per_entry_ratio = 1139 (double) cards_scanned / (double) _max_rs_lengths; 1140 if (collector_state()->last_gc_was_young()) { 1141 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1142 } else { 1143 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1144 } 1145 } 1146 1147 // This is defensive. For a while _max_rs_lengths could get 1148 // smaller than _recorded_rs_lengths which was causing 1149 // rs_length_diff to get very large and mess up the RSet length 1150 // predictions. The reason was unsafe concurrent updates to the 1151 // _inc_cset_recorded_rs_lengths field which the code below guards 1152 // against (see CR 7118202). This bug has now been fixed (see CR 1153 // 7119027). However, I'm still worried that 1154 // _inc_cset_recorded_rs_lengths might still end up somewhat 1155 // inaccurate. The concurrent refinement thread calculates an 1156 // RSet's length concurrently with other CR threads updating it 1157 // which might cause it to calculate the length incorrectly (if, 1158 // say, it's in mid-coarsening). So I'll leave in the defensive 1159 // conditional below just in case. 1160 size_t rs_length_diff = 0; 1161 if (_max_rs_lengths > _recorded_rs_lengths) { 1162 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1163 } 1164 _rs_length_diff_seq->add((double) rs_length_diff); 1165 1166 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; 1167 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1168 double cost_per_byte_ms = 0.0; 1169 1170 if (copied_bytes > 0) { 1171 cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; 1172 if (collector_state()->in_marking_window()) { 1173 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1174 } else { 1175 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1176 } 1177 } 1178 1179 if (young_cset_region_length() > 0) { 1180 _young_other_cost_per_region_ms_seq->add(young_other_time_ms() / 1181 young_cset_region_length()); 1182 } 1183 1184 if (old_cset_region_length() > 0) { 1185 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / 1186 old_cset_region_length()); 1187 } 1188 1189 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); 1190 1191 _pending_cards_seq->add((double) _pending_cards); 1192 _rs_lengths_seq->add((double) _max_rs_lengths); 1193 } 1194 1195 collector_state()->set_in_marking_window(new_in_marking_window); 1196 collector_state()->set_in_marking_window_im(new_in_marking_window_im); 1197 _free_regions_at_end_of_collection = _g1->num_free_regions(); 1198 // IHOP control wants to know the expected young gen length if it were not 1199 // restrained by the heap reserve. Using the actual length would make the 1200 // prediction too small and the limit the young gen every time we get to the 1201 // predicted target occupancy. 1202 size_t last_unrestrained_young_length = 0; 1203 update_young_list_max_and_target_length(&last_unrestrained_young_length); 1204 update_rs_lengths_prediction(); 1205 1206 double marking_to_mixed_time = -1.0; 1207 if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) { 1208 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); 1209 assert(marking_to_mixed_time > 0.0, 1210 "Initial mark to mixed time must be larger than zero but is %.3f", 1211 marking_to_mixed_time); 1212 } 1213 // Only update IHOP information on regular GCs. 1214 if (update_stats) { 1215 update_ihop_statistics(marking_to_mixed_time, 1216 app_time_ms / 1000.0, 1217 _last_old_allocated_bytes, 1218 last_unrestrained_young_length * HeapRegion::GrainBytes); 1219 } 1220 _last_old_allocated_bytes = 0; 1221 1222 _ihop_control->send_event(_g1->gc_tracer_stw()); 1223 1224 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1225 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1226 1227 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC); 1228 1229 if (update_rs_time_goal_ms < scan_hcc_time_ms) { 1230 ergo_verbose2(ErgoTiming, 1231 "adjust concurrent refinement thresholds", 1232 ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal") 1233 ergo_format_ms("Update RS time goal") 1234 ergo_format_ms("Scan HCC time"), 1235 update_rs_time_goal_ms, 1236 scan_hcc_time_ms); 1237 1238 update_rs_time_goal_ms = 0; 1239 } else { 1240 update_rs_time_goal_ms -= scan_hcc_time_ms; 1241 } 1242 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms, 1243 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), 1244 update_rs_time_goal_ms); 1245 1246 _collectionSetChooser->verify(); 1247 } 1248 1249 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const { 1250 if (G1UseAdaptiveIHOP) { 1251 // The target occupancy is the total heap occupancy we want to hit. First, we 1252 // want to avoid eating into the reserve intended for young GC (to avoid unnecessary 1253 // throughput loss). Additionally G1 is free to not clean out up to 1254 // G1HeapWastePercent of heap, that space also cannot be used for allocation 1255 // while marking. 1256 size_t safe_heap_percentage = (size_t) (G1ReservePercent + G1HeapWastePercent); 1257 size_t target_occupancy = 0; 1258 1259 if (safe_heap_percentage < 100) { 1260 target_occupancy = G1CollectedHeap::heap()->max_capacity() * (100.0 - safe_heap_percentage) / 100.0; 1261 } 1262 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent, 1263 target_occupancy, 1264 &_predictor); 1265 } else { 1266 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent, 1267 G1CollectedHeap::heap()->max_capacity()); 1268 } 1269 } 1270 1271 void G1CollectorPolicy::update_ihop_statistics(double marking_time, 1272 double mutator_time_s, 1273 size_t mutator_alloc_bytes, 1274 size_t young_gen_size) { 1275 bool report = false; 1276 1277 // To avoid using really small times that may be caused by e.g. back-to-back gcs 1278 // we filter them out. 1279 double const min_valid_time = 1e-6; 1280 1281 if (marking_time > min_valid_time) { 1282 _ihop_control->update_time_to_mixed(marking_time); 1283 report = true; 1284 } 1285 1286 // As an approximation for the young gc promotion rates during marking we use 1287 // all of them. In many applications there are only a few if any young gcs during 1288 // marking, which makes any prediction useless. This increases the accuracy of the 1289 // prediction. 1290 if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) { 1291 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size); 1292 report = true; 1293 } 1294 1295 if (report) { 1296 report_ihop_statistics(); 1297 } 1298 } 1299 1300 void G1CollectorPolicy::report_ihop_statistics() { 1301 _ihop_control->print(); 1302 } 1303 1304 #define EXT_SIZE_FORMAT "%.1f%s" 1305 #define EXT_SIZE_PARAMS(bytes) \ 1306 byte_size_in_proper_unit((double)(bytes)), \ 1307 proper_unit_for_byte_size((bytes)) 1308 1309 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { 1310 YoungList* young_list = _g1->young_list(); 1311 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1312 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1313 _heap_capacity_bytes_before_gc = _g1->capacity(); 1314 _heap_used_bytes_before_gc = _g1->used(); 1315 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions(); 1316 1317 _eden_capacity_bytes_before_gc = 1318 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1319 1320 if (full) { 1321 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes(); 1322 } 1323 } 1324 1325 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const { 1326 size_t bytes_after = _g1->used(); 1327 size_t capacity = _g1->capacity(); 1328 1329 gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)", 1330 byte_size_in_proper_unit(bytes_before), 1331 proper_unit_for_byte_size(bytes_before), 1332 byte_size_in_proper_unit(bytes_after), 1333 proper_unit_for_byte_size(bytes_after), 1334 byte_size_in_proper_unit(capacity), 1335 proper_unit_for_byte_size(capacity)); 1336 } 1337 1338 void G1CollectorPolicy::print_heap_transition() const { 1339 print_heap_transition(_heap_used_bytes_before_gc); 1340 } 1341 1342 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const { 1343 YoungList* young_list = _g1->young_list(); 1344 1345 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); 1346 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); 1347 size_t heap_used_bytes_after_gc = _g1->used(); 1348 1349 size_t heap_capacity_bytes_after_gc = _g1->capacity(); 1350 size_t eden_capacity_bytes_after_gc = 1351 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; 1352 1353 gclog_or_tty->print( 1354 " [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") " 1355 "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " " 1356 "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" 1357 EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]", 1358 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), 1359 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), 1360 EXT_SIZE_PARAMS(eden_used_bytes_after_gc), 1361 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), 1362 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), 1363 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), 1364 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), 1365 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), 1366 EXT_SIZE_PARAMS(heap_used_bytes_after_gc), 1367 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); 1368 1369 if (full) { 1370 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); 1371 } 1372 1373 gclog_or_tty->cr(); 1374 } 1375 1376 void G1CollectorPolicy::print_phases(double pause_time_sec) { 1377 phase_times()->print(pause_time_sec); 1378 } 1379 1380 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1381 double update_rs_processed_buffers, 1382 double goal_ms) { 1383 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1384 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1385 1386 if (G1UseAdaptiveConcRefinement) { 1387 const int k_gy = 3, k_gr = 6; 1388 const double inc_k = 1.1, dec_k = 0.9; 1389 1390 int g = cg1r->green_zone(); 1391 if (update_rs_time > goal_ms) { 1392 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1393 } else { 1394 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1395 g = (int)MAX2(g * inc_k, g + 1.0); 1396 } 1397 } 1398 // Change the refinement threads params 1399 cg1r->set_green_zone(g); 1400 cg1r->set_yellow_zone(g * k_gy); 1401 cg1r->set_red_zone(g * k_gr); 1402 cg1r->reinitialize_threads(); 1403 1404 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1); 1405 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1406 cg1r->yellow_zone()); 1407 // Change the barrier params 1408 dcqs.set_process_completed_threshold(processing_threshold); 1409 dcqs.set_max_completed_queue(cg1r->red_zone()); 1410 } 1411 1412 int curr_queue_size = dcqs.completed_buffers_num(); 1413 if (curr_queue_size >= cg1r->yellow_zone()) { 1414 dcqs.set_completed_queue_padding(curr_queue_size); 1415 } else { 1416 dcqs.set_completed_queue_padding(0); 1417 } 1418 dcqs.notify_if_necessary(); 1419 } 1420 1421 size_t G1CollectorPolicy::predict_rs_length_diff() const { 1422 return (size_t) get_new_prediction(_rs_length_diff_seq); 1423 } 1424 1425 double G1CollectorPolicy::predict_alloc_rate_ms() const { 1426 return get_new_prediction(_alloc_rate_ms_seq); 1427 } 1428 1429 double G1CollectorPolicy::predict_cost_per_card_ms() const { 1430 return get_new_prediction(_cost_per_card_ms_seq); 1431 } 1432 1433 double G1CollectorPolicy::predict_scan_hcc_ms() const { 1434 return get_new_prediction(_cost_scan_hcc_seq); 1435 } 1436 1437 double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const { 1438 return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms(); 1439 } 1440 1441 double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const { 1442 return get_new_prediction(_young_cards_per_entry_ratio_seq); 1443 } 1444 1445 double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const { 1446 if (_mixed_cards_per_entry_ratio_seq->num() < 2) { 1447 return predict_young_cards_per_entry_ratio(); 1448 } else { 1449 return get_new_prediction(_mixed_cards_per_entry_ratio_seq); 1450 } 1451 } 1452 1453 size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const { 1454 return (size_t) (rs_length * predict_young_cards_per_entry_ratio()); 1455 } 1456 1457 size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const { 1458 return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio()); 1459 } 1460 1461 double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const { 1462 if (collector_state()->gcs_are_young()) { 1463 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1464 } else { 1465 return predict_mixed_rs_scan_time_ms(card_num); 1466 } 1467 } 1468 1469 double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const { 1470 if (_mixed_cost_per_entry_ms_seq->num() < 3) { 1471 return card_num * get_new_prediction(_cost_per_entry_ms_seq); 1472 } else { 1473 return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq); 1474 } 1475 } 1476 1477 double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const { 1478 if (_cost_per_byte_ms_during_cm_seq->num() < 3) { 1479 return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq); 1480 } else { 1481 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq); 1482 } 1483 } 1484 1485 double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const { 1486 if (collector_state()->during_concurrent_mark()) { 1487 return predict_object_copy_time_ms_during_cm(bytes_to_copy); 1488 } else { 1489 return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq); 1490 } 1491 } 1492 1493 double G1CollectorPolicy::predict_constant_other_time_ms() const { 1494 return get_new_prediction(_constant_other_time_ms_seq); 1495 } 1496 1497 double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const { 1498 return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq); 1499 } 1500 1501 double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const { 1502 return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq); 1503 } 1504 1505 double G1CollectorPolicy::predict_remark_time_ms() const { 1506 return get_new_prediction(_concurrent_mark_remark_times_ms); 1507 } 1508 1509 double G1CollectorPolicy::predict_cleanup_time_ms() const { 1510 return get_new_prediction(_concurrent_mark_cleanup_times_ms); 1511 } 1512 1513 double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const { 1514 TruncatedSeq* seq = surv_rate_group->get_seq(age); 1515 guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age); 1516 double pred = get_new_prediction(seq); 1517 if (pred > 1.0) { 1518 pred = 1.0; 1519 } 1520 return pred; 1521 } 1522 1523 double G1CollectorPolicy::predict_yg_surv_rate(int age) const { 1524 return predict_yg_surv_rate(age, _short_lived_surv_rate_group); 1525 } 1526 1527 double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const { 1528 return _short_lived_surv_rate_group->accum_surv_rate_pred(age); 1529 } 1530 1531 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1532 size_t scanned_cards) const { 1533 return 1534 predict_rs_update_time_ms(pending_cards) + 1535 predict_rs_scan_time_ms(scanned_cards) + 1536 predict_constant_other_time_ms(); 1537 } 1538 1539 double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const { 1540 size_t rs_length = predict_rs_length_diff(); 1541 size_t card_num; 1542 if (collector_state()->gcs_are_young()) { 1543 card_num = predict_young_card_num(rs_length); 1544 } else { 1545 card_num = predict_non_young_card_num(rs_length); 1546 } 1547 return predict_base_elapsed_time_ms(pending_cards, card_num); 1548 } 1549 1550 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const { 1551 size_t bytes_to_copy; 1552 if (hr->is_marked()) 1553 bytes_to_copy = hr->max_live_bytes(); 1554 else { 1555 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1556 int age = hr->age_in_surv_rate_group(); 1557 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1558 bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); 1559 } 1560 return bytes_to_copy; 1561 } 1562 1563 double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1564 bool for_young_gc) const { 1565 size_t rs_length = hr->rem_set()->occupied(); 1566 size_t card_num; 1567 1568 // Predicting the number of cards is based on which type of GC 1569 // we're predicting for. 1570 if (for_young_gc) { 1571 card_num = predict_young_card_num(rs_length); 1572 } else { 1573 card_num = predict_non_young_card_num(rs_length); 1574 } 1575 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1576 1577 double region_elapsed_time_ms = 1578 predict_rs_scan_time_ms(card_num) + 1579 predict_object_copy_time_ms(bytes_to_copy); 1580 1581 // The prediction of the "other" time for this region is based 1582 // upon the region type and NOT the GC type. 1583 if (hr->is_young()) { 1584 region_elapsed_time_ms += predict_young_other_time_ms(1); 1585 } else { 1586 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1587 } 1588 return region_elapsed_time_ms; 1589 } 1590 1591 void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1592 uint survivor_cset_region_length) { 1593 _eden_cset_region_length = eden_cset_region_length; 1594 _survivor_cset_region_length = survivor_cset_region_length; 1595 _old_cset_region_length = 0; 1596 } 1597 1598 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1599 _recorded_rs_lengths = rs_lengths; 1600 } 1601 1602 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1603 double elapsed_ms) { 1604 _recent_gc_times_ms->add(elapsed_ms); 1605 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1606 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1607 } 1608 1609 size_t G1CollectorPolicy::expansion_amount() const { 1610 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1611 double threshold = _gc_overhead_perc; 1612 if (recent_gc_overhead > threshold) { 1613 // We will double the existing space, or take 1614 // G1ExpandByPercentOfAvailable % of the available expansion 1615 // space, whichever is smaller, bounded below by a minimum 1616 // expansion (unless that's all that's left.) 1617 const size_t min_expand_bytes = 1*M; 1618 size_t reserved_bytes = _g1->max_capacity(); 1619 size_t committed_bytes = _g1->capacity(); 1620 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1621 size_t expand_bytes; 1622 size_t expand_bytes_via_pct = 1623 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1624 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1625 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1626 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1627 1628 ergo_verbose5(ErgoHeapSizing, 1629 "attempt heap expansion", 1630 ergo_format_reason("recent GC overhead higher than " 1631 "threshold after GC") 1632 ergo_format_perc("recent GC overhead") 1633 ergo_format_perc("threshold") 1634 ergo_format_byte("uncommitted") 1635 ergo_format_byte_perc("calculated expansion amount"), 1636 recent_gc_overhead, threshold, 1637 uncommitted_bytes, 1638 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1639 1640 return expand_bytes; 1641 } else { 1642 return 0; 1643 } 1644 } 1645 1646 void G1CollectorPolicy::print_tracing_info() const { 1647 _trace_young_gen_time_data.print(); 1648 _trace_old_gen_time_data.print(); 1649 } 1650 1651 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1652 #ifndef PRODUCT 1653 _short_lived_surv_rate_group->print_surv_rate_summary(); 1654 // add this call for any other surv rate groups 1655 #endif // PRODUCT 1656 } 1657 1658 bool G1CollectorPolicy::is_young_list_full() const { 1659 uint young_list_length = _g1->young_list()->length(); 1660 uint young_list_target_length = _young_list_target_length; 1661 return young_list_length >= young_list_target_length; 1662 } 1663 1664 bool G1CollectorPolicy::can_expand_young_list() const { 1665 uint young_list_length = _g1->young_list()->length(); 1666 uint young_list_max_length = _young_list_max_length; 1667 return young_list_length < young_list_max_length; 1668 } 1669 1670 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1671 uint expansion_region_num = 0; 1672 if (GCLockerEdenExpansionPercent > 0) { 1673 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1674 double expansion_region_num_d = perc * (double) _young_list_target_length; 1675 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1676 // less than 1.0) we'll get 1. 1677 expansion_region_num = (uint) ceil(expansion_region_num_d); 1678 } else { 1679 assert(expansion_region_num == 0, "sanity"); 1680 } 1681 _young_list_max_length = _young_list_target_length + expansion_region_num; 1682 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1683 } 1684 1685 // Calculates survivor space parameters. 1686 void G1CollectorPolicy::update_survivors_policy() { 1687 double max_survivor_regions_d = 1688 (double) _young_list_target_length / (double) SurvivorRatio; 1689 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1690 // smaller than 1.0) we'll get 1. 1691 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1692 1693 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1694 HeapRegion::GrainWords * _max_survivor_regions, counters()); 1695 } 1696 1697 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { 1698 // We actually check whether we are marking here and not if we are in a 1699 // reclamation phase. This means that we will schedule a concurrent mark 1700 // even while we are still in the process of reclaiming memory. 1701 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1702 if (!during_cycle) { 1703 ergo_verbose1(ErgoConcCycles, 1704 "request concurrent cycle initiation", 1705 ergo_format_reason("requested by GC cause") 1706 ergo_format_str("GC cause"), 1707 GCCause::to_string(gc_cause)); 1708 collector_state()->set_initiate_conc_mark_if_possible(true); 1709 return true; 1710 } else { 1711 ergo_verbose1(ErgoConcCycles, 1712 "do not request concurrent cycle initiation", 1713 ergo_format_reason("concurrent cycle already in progress") 1714 ergo_format_str("GC cause"), 1715 GCCause::to_string(gc_cause)); 1716 return false; 1717 } 1718 } 1719 1720 void G1CollectorPolicy::decide_on_conc_mark_initiation() { 1721 // We are about to decide on whether this pause will be an 1722 // initial-mark pause. 1723 1724 // First, collector_state()->during_initial_mark_pause() should not be already set. We 1725 // will set it here if we have to. However, it should be cleared by 1726 // the end of the pause (it's only set for the duration of an 1727 // initial-mark pause). 1728 assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); 1729 1730 if (collector_state()->initiate_conc_mark_if_possible()) { 1731 // We had noticed on a previous pause that the heap occupancy has 1732 // gone over the initiating threshold and we should start a 1733 // concurrent marking cycle. So we might initiate one. 1734 1735 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { 1736 // Initiate a new initial mark only if there is no marking or reclamation going 1737 // on. 1738 1739 collector_state()->set_during_initial_mark_pause(true); 1740 // And we can now clear initiate_conc_mark_if_possible() as 1741 // we've already acted on it. 1742 collector_state()->set_initiate_conc_mark_if_possible(false); 1743 1744 ergo_verbose0(ErgoConcCycles, 1745 "initiate concurrent cycle", 1746 ergo_format_reason("concurrent cycle initiation requested")); 1747 } else { 1748 // The concurrent marking thread is still finishing up the 1749 // previous cycle. If we start one right now the two cycles 1750 // overlap. In particular, the concurrent marking thread might 1751 // be in the process of clearing the next marking bitmap (which 1752 // we will use for the next cycle if we start one). Starting a 1753 // cycle now will be bad given that parts of the marking 1754 // information might get cleared by the marking thread. And we 1755 // cannot wait for the marking thread to finish the cycle as it 1756 // periodically yields while clearing the next marking bitmap 1757 // and, if it's in a yield point, it's waiting for us to 1758 // finish. So, at this point we will not start a cycle and we'll 1759 // let the concurrent marking thread complete the last one. 1760 ergo_verbose0(ErgoConcCycles, 1761 "do not initiate concurrent cycle", 1762 ergo_format_reason("concurrent cycle already in progress")); 1763 } 1764 } 1765 } 1766 1767 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1768 G1CollectedHeap* _g1h; 1769 CSetChooserParUpdater _cset_updater; 1770 1771 public: 1772 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1773 uint chunk_size) : 1774 _g1h(G1CollectedHeap::heap()), 1775 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1776 1777 bool doHeapRegion(HeapRegion* r) { 1778 // Do we have any marking information for this region? 1779 if (r->is_marked()) { 1780 // We will skip any region that's currently used as an old GC 1781 // alloc region (we should not consider those for collection 1782 // before we fill them up). 1783 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1784 _cset_updater.add_region(r); 1785 } 1786 } 1787 return false; 1788 } 1789 }; 1790 1791 class ParKnownGarbageTask: public AbstractGangTask { 1792 CollectionSetChooser* _hrSorted; 1793 uint _chunk_size; 1794 G1CollectedHeap* _g1; 1795 HeapRegionClaimer _hrclaimer; 1796 1797 public: 1798 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) : 1799 AbstractGangTask("ParKnownGarbageTask"), 1800 _hrSorted(hrSorted), _chunk_size(chunk_size), 1801 _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {} 1802 1803 void work(uint worker_id) { 1804 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1805 _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer); 1806 } 1807 }; 1808 1809 uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const { 1810 assert(n_workers > 0, "Active gc workers should be greater than 0"); 1811 const uint overpartition_factor = 4; 1812 const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); 1813 return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); 1814 } 1815 1816 void G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1817 _collectionSetChooser->clear(); 1818 1819 WorkGang* workers = _g1->workers(); 1820 uint n_workers = workers->active_workers(); 1821 1822 uint n_regions = _g1->num_regions(); 1823 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); 1824 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size); 1825 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers); 1826 workers->run_task(&par_known_garbage_task); 1827 1828 _collectionSetChooser->sort_regions(); 1829 1830 double end_sec = os::elapsedTime(); 1831 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1832 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1833 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1834 _prev_collection_pause_end_ms += elapsed_time_ms; 1835 1836 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); 1837 } 1838 1839 // Add the heap region at the head of the non-incremental collection set 1840 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1841 assert(_inc_cset_build_state == Active, "Precondition"); 1842 assert(hr->is_old(), "the region should be old"); 1843 1844 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1845 _g1->register_old_region_with_cset(hr); 1846 hr->set_next_in_collection_set(_collection_set); 1847 _collection_set = hr; 1848 _collection_set_bytes_used_before += hr->used(); 1849 size_t rs_length = hr->rem_set()->occupied(); 1850 _recorded_rs_lengths += rs_length; 1851 _old_cset_region_length += 1; 1852 } 1853 1854 // Initialize the per-collection-set information 1855 void G1CollectorPolicy::start_incremental_cset_building() { 1856 assert(_inc_cset_build_state == Inactive, "Precondition"); 1857 1858 _inc_cset_head = NULL; 1859 _inc_cset_tail = NULL; 1860 _inc_cset_bytes_used_before = 0; 1861 1862 _inc_cset_max_finger = 0; 1863 _inc_cset_recorded_rs_lengths = 0; 1864 _inc_cset_recorded_rs_lengths_diffs = 0; 1865 _inc_cset_predicted_elapsed_time_ms = 0.0; 1866 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1867 _inc_cset_build_state = Active; 1868 } 1869 1870 void G1CollectorPolicy::finalize_incremental_cset_building() { 1871 assert(_inc_cset_build_state == Active, "Precondition"); 1872 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1873 1874 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1875 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1876 // that adds a new region to the CSet. Further updates by the 1877 // concurrent refinement thread that samples the young RSet lengths 1878 // are accumulated in the *_diffs fields. Here we add the diffs to 1879 // the "main" fields. 1880 1881 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1882 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1883 } else { 1884 // This is defensive. The diff should in theory be always positive 1885 // as RSets can only grow between GCs. However, given that we 1886 // sample their size concurrently with other threads updating them 1887 // it's possible that we might get the wrong size back, which 1888 // could make the calculations somewhat inaccurate. 1889 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1890 if (_inc_cset_recorded_rs_lengths >= diffs) { 1891 _inc_cset_recorded_rs_lengths -= diffs; 1892 } else { 1893 _inc_cset_recorded_rs_lengths = 0; 1894 } 1895 } 1896 _inc_cset_predicted_elapsed_time_ms += 1897 _inc_cset_predicted_elapsed_time_ms_diffs; 1898 1899 _inc_cset_recorded_rs_lengths_diffs = 0; 1900 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1901 } 1902 1903 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1904 // This routine is used when: 1905 // * adding survivor regions to the incremental cset at the end of an 1906 // evacuation pause, 1907 // * adding the current allocation region to the incremental cset 1908 // when it is retired, and 1909 // * updating existing policy information for a region in the 1910 // incremental cset via young list RSet sampling. 1911 // Therefore this routine may be called at a safepoint by the 1912 // VM thread, or in-between safepoints by mutator threads (when 1913 // retiring the current allocation region) or a concurrent 1914 // refine thread (RSet sampling). 1915 1916 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1917 size_t used_bytes = hr->used(); 1918 _inc_cset_recorded_rs_lengths += rs_length; 1919 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1920 _inc_cset_bytes_used_before += used_bytes; 1921 1922 // Cache the values we have added to the aggregated information 1923 // in the heap region in case we have to remove this region from 1924 // the incremental collection set, or it is updated by the 1925 // rset sampling code 1926 hr->set_recorded_rs_length(rs_length); 1927 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1928 } 1929 1930 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1931 size_t new_rs_length) { 1932 // Update the CSet information that is dependent on the new RS length 1933 assert(hr->is_young(), "Precondition"); 1934 assert(!SafepointSynchronize::is_at_safepoint(), 1935 "should not be at a safepoint"); 1936 1937 // We could have updated _inc_cset_recorded_rs_lengths and 1938 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1939 // that atomically, as this code is executed by a concurrent 1940 // refinement thread, potentially concurrently with a mutator thread 1941 // allocating a new region and also updating the same fields. To 1942 // avoid the atomic operations we accumulate these updates on two 1943 // separate fields (*_diffs) and we'll just add them to the "main" 1944 // fields at the start of a GC. 1945 1946 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1947 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1948 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1949 1950 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1951 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 1952 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1953 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1954 1955 hr->set_recorded_rs_length(new_rs_length); 1956 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1957 } 1958 1959 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1960 assert(hr->is_young(), "invariant"); 1961 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1962 assert(_inc_cset_build_state == Active, "Precondition"); 1963 1964 // We need to clear and set the cached recorded/cached collection set 1965 // information in the heap region here (before the region gets added 1966 // to the collection set). An individual heap region's cached values 1967 // are calculated, aggregated with the policy collection set info, 1968 // and cached in the heap region here (initially) and (subsequently) 1969 // by the Young List sampling code. 1970 1971 size_t rs_length = hr->rem_set()->occupied(); 1972 add_to_incremental_cset_info(hr, rs_length); 1973 1974 HeapWord* hr_end = hr->end(); 1975 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1976 1977 assert(!hr->in_collection_set(), "invariant"); 1978 _g1->register_young_region_with_cset(hr); 1979 assert(hr->next_in_collection_set() == NULL, "invariant"); 1980 } 1981 1982 // Add the region at the RHS of the incremental cset 1983 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1984 // We should only ever be appending survivors at the end of a pause 1985 assert(hr->is_survivor(), "Logic"); 1986 1987 // Do the 'common' stuff 1988 add_region_to_incremental_cset_common(hr); 1989 1990 // Now add the region at the right hand side 1991 if (_inc_cset_tail == NULL) { 1992 assert(_inc_cset_head == NULL, "invariant"); 1993 _inc_cset_head = hr; 1994 } else { 1995 _inc_cset_tail->set_next_in_collection_set(hr); 1996 } 1997 _inc_cset_tail = hr; 1998 } 1999 2000 // Add the region to the LHS of the incremental cset 2001 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 2002 // Survivors should be added to the RHS at the end of a pause 2003 assert(hr->is_eden(), "Logic"); 2004 2005 // Do the 'common' stuff 2006 add_region_to_incremental_cset_common(hr); 2007 2008 // Add the region at the left hand side 2009 hr->set_next_in_collection_set(_inc_cset_head); 2010 if (_inc_cset_head == NULL) { 2011 assert(_inc_cset_tail == NULL, "Invariant"); 2012 _inc_cset_tail = hr; 2013 } 2014 _inc_cset_head = hr; 2015 } 2016 2017 #ifndef PRODUCT 2018 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 2019 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 2020 2021 st->print_cr("\nCollection_set:"); 2022 HeapRegion* csr = list_head; 2023 while (csr != NULL) { 2024 HeapRegion* next = csr->next_in_collection_set(); 2025 assert(csr->in_collection_set(), "bad CS"); 2026 st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", 2027 HR_FORMAT_PARAMS(csr), 2028 p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()), 2029 csr->age_in_surv_rate_group_cond()); 2030 csr = next; 2031 } 2032 } 2033 #endif // !PRODUCT 2034 2035 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { 2036 // Returns the given amount of reclaimable bytes (that represents 2037 // the amount of reclaimable space still to be collected) as a 2038 // percentage of the current heap capacity. 2039 size_t capacity_bytes = _g1->capacity(); 2040 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 2041 } 2042 2043 void G1CollectorPolicy::maybe_start_marking() { 2044 if (need_to_start_conc_mark("end of GC")) { 2045 // Note: this might have already been set, if during the last 2046 // pause we decided to start a cycle but at the beginning of 2047 // this pause we decided to postpone it. That's OK. 2048 collector_state()->set_initiate_conc_mark_if_possible(true); 2049 } 2050 } 2051 2052 G1CollectorPolicy::PauseKind G1CollectorPolicy::young_gc_pause_kind() const { 2053 assert(!collector_state()->full_collection(), "must be"); 2054 if (collector_state()->during_initial_mark_pause()) { 2055 assert(collector_state()->last_gc_was_young(), "must be"); 2056 assert(!collector_state()->last_young_gc(), "must be"); 2057 return InitialMarkGC; 2058 } else if (collector_state()->last_young_gc()) { 2059 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2060 assert(collector_state()->last_gc_was_young(), "must be"); 2061 return LastYoungGC; 2062 } else if (!collector_state()->last_gc_was_young()) { 2063 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2064 assert(!collector_state()->last_young_gc(), "must be"); 2065 return MixedGC; 2066 } else { 2067 assert(collector_state()->last_gc_was_young(), "must be"); 2068 assert(!collector_state()->during_initial_mark_pause(), "must be"); 2069 assert(!collector_state()->last_young_gc(), "must be"); 2070 return YoungOnlyGC; 2071 } 2072 } 2073 2074 void G1CollectorPolicy::record_pause(PauseKind kind, double start, double end) { 2075 // Manage the MMU tracker. For some reason it ignores Full GCs. 2076 if (kind != FullGC) { 2077 _mmu_tracker->add_pause(start, end); 2078 } 2079 // Manage the mutator time tracking from initial mark to first mixed gc. 2080 switch (kind) { 2081 case FullGC: 2082 abort_time_to_mixed_tracking(); 2083 break; 2084 case Cleanup: 2085 case Remark: 2086 case YoungOnlyGC: 2087 case LastYoungGC: 2088 _initial_mark_to_mixed.add_pause(end - start); 2089 break; 2090 case InitialMarkGC: 2091 _initial_mark_to_mixed.record_initial_mark_end(end); 2092 break; 2093 case MixedGC: 2094 _initial_mark_to_mixed.record_mixed_gc_start(start); 2095 break; 2096 default: 2097 ShouldNotReachHere(); 2098 } 2099 } 2100 2101 void G1CollectorPolicy::abort_time_to_mixed_tracking() { 2102 _initial_mark_to_mixed.reset(); 2103 } 2104 2105 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 2106 const char* false_action_str) const { 2107 CollectionSetChooser* cset_chooser = _collectionSetChooser; 2108 if (cset_chooser->is_empty()) { 2109 ergo_verbose0(ErgoMixedGCs, 2110 false_action_str, 2111 ergo_format_reason("candidate old regions not available")); 2112 return false; 2113 } 2114 2115 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 2116 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2117 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2118 double threshold = (double) G1HeapWastePercent; 2119 if (reclaimable_perc <= threshold) { 2120 ergo_verbose4(ErgoMixedGCs, 2121 false_action_str, 2122 ergo_format_reason("reclaimable percentage not over threshold") 2123 ergo_format_region("candidate old regions") 2124 ergo_format_byte_perc("reclaimable") 2125 ergo_format_perc("threshold"), 2126 cset_chooser->remaining_regions(), 2127 reclaimable_bytes, 2128 reclaimable_perc, threshold); 2129 return false; 2130 } 2131 2132 ergo_verbose4(ErgoMixedGCs, 2133 true_action_str, 2134 ergo_format_reason("candidate old regions available") 2135 ergo_format_region("candidate old regions") 2136 ergo_format_byte_perc("reclaimable") 2137 ergo_format_perc("threshold"), 2138 cset_chooser->remaining_regions(), 2139 reclaimable_bytes, 2140 reclaimable_perc, threshold); 2141 return true; 2142 } 2143 2144 uint G1CollectorPolicy::calc_min_old_cset_length() const { 2145 // The min old CSet region bound is based on the maximum desired 2146 // number of mixed GCs after a cycle. I.e., even if some old regions 2147 // look expensive, we should add them to the CSet anyway to make 2148 // sure we go through the available old regions in no more than the 2149 // maximum desired number of mixed GCs. 2150 // 2151 // The calculation is based on the number of marked regions we added 2152 // to the CSet chooser in the first place, not how many remain, so 2153 // that the result is the same during all mixed GCs that follow a cycle. 2154 2155 const size_t region_num = (size_t) _collectionSetChooser->length(); 2156 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 2157 size_t result = region_num / gc_num; 2158 // emulate ceiling 2159 if (result * gc_num < region_num) { 2160 result += 1; 2161 } 2162 return (uint) result; 2163 } 2164 2165 uint G1CollectorPolicy::calc_max_old_cset_length() const { 2166 // The max old CSet region bound is based on the threshold expressed 2167 // as a percentage of the heap size. I.e., it should bound the 2168 // number of old regions added to the CSet irrespective of how many 2169 // of them are available. 2170 2171 const G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2172 const size_t region_num = g1h->num_regions(); 2173 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 2174 size_t result = region_num * perc / 100; 2175 // emulate ceiling 2176 if (100 * result < region_num * perc) { 2177 result += 1; 2178 } 2179 return (uint) result; 2180 } 2181 2182 2183 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) { 2184 double young_start_time_sec = os::elapsedTime(); 2185 2186 YoungList* young_list = _g1->young_list(); 2187 finalize_incremental_cset_building(); 2188 2189 guarantee(target_pause_time_ms > 0.0, 2190 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms); 2191 guarantee(_collection_set == NULL, "Precondition"); 2192 2193 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 2194 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 2195 2196 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 2197 "start choosing CSet", 2198 ergo_format_size("_pending_cards") 2199 ergo_format_ms("predicted base time") 2200 ergo_format_ms("remaining time") 2201 ergo_format_ms("target pause time"), 2202 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 2203 2204 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); 2205 2206 if (collector_state()->last_gc_was_young()) { 2207 _trace_young_gen_time_data.increment_young_collection_count(); 2208 } else { 2209 _trace_young_gen_time_data.increment_mixed_collection_count(); 2210 } 2211 2212 // The young list is laid with the survivor regions from the previous 2213 // pause are appended to the RHS of the young list, i.e. 2214 // [Newly Young Regions ++ Survivors from last pause]. 2215 2216 uint survivor_region_length = young_list->survivor_length(); 2217 uint eden_region_length = young_list->eden_length(); 2218 init_cset_region_lengths(eden_region_length, survivor_region_length); 2219 2220 HeapRegion* hr = young_list->first_survivor_region(); 2221 while (hr != NULL) { 2222 assert(hr->is_survivor(), "badly formed young list"); 2223 // There is a convention that all the young regions in the CSet 2224 // are tagged as "eden", so we do this for the survivors here. We 2225 // use the special set_eden_pre_gc() as it doesn't check that the 2226 // region is free (which is not the case here). 2227 hr->set_eden_pre_gc(); 2228 hr = hr->get_next_young_region(); 2229 } 2230 2231 // Clear the fields that point to the survivor list - they are all young now. 2232 young_list->clear_survivors(); 2233 2234 _collection_set = _inc_cset_head; 2235 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 2236 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 2237 2238 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 2239 "add young regions to CSet", 2240 ergo_format_region("eden") 2241 ergo_format_region("survivors") 2242 ergo_format_ms("predicted young region time") 2243 ergo_format_ms("target pause time"), 2244 eden_region_length, survivor_region_length, 2245 _inc_cset_predicted_elapsed_time_ms, 2246 target_pause_time_ms); 2247 2248 // The number of recorded young regions is the incremental 2249 // collection set's current size 2250 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 2251 2252 double young_end_time_sec = os::elapsedTime(); 2253 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 2254 2255 return time_remaining_ms; 2256 } 2257 2258 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) { 2259 double non_young_start_time_sec = os::elapsedTime(); 2260 double predicted_old_time_ms = 0.0; 2261 2262 2263 if (!collector_state()->gcs_are_young()) { 2264 CollectionSetChooser* cset_chooser = _collectionSetChooser; 2265 cset_chooser->verify(); 2266 const uint min_old_cset_length = calc_min_old_cset_length(); 2267 const uint max_old_cset_length = calc_max_old_cset_length(); 2268 2269 uint expensive_region_num = 0; 2270 bool check_time_remaining = adaptive_young_list_length(); 2271 2272 HeapRegion* hr = cset_chooser->peek(); 2273 while (hr != NULL) { 2274 if (old_cset_region_length() >= max_old_cset_length) { 2275 // Added maximum number of old regions to the CSet. 2276 ergo_verbose2(ErgoCSetConstruction, 2277 "finish adding old regions to CSet", 2278 ergo_format_reason("old CSet region num reached max") 2279 ergo_format_region("old") 2280 ergo_format_region("max"), 2281 old_cset_region_length(), max_old_cset_length); 2282 break; 2283 } 2284 2285 2286 // Stop adding regions if the remaining reclaimable space is 2287 // not above G1HeapWastePercent. 2288 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2289 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2290 double threshold = (double) G1HeapWastePercent; 2291 if (reclaimable_perc <= threshold) { 2292 // We've added enough old regions that the amount of uncollected 2293 // reclaimable space is at or below the waste threshold. Stop 2294 // adding old regions to the CSet. 2295 ergo_verbose5(ErgoCSetConstruction, 2296 "finish adding old regions to CSet", 2297 ergo_format_reason("reclaimable percentage not over threshold") 2298 ergo_format_region("old") 2299 ergo_format_region("max") 2300 ergo_format_byte_perc("reclaimable") 2301 ergo_format_perc("threshold"), 2302 old_cset_region_length(), 2303 max_old_cset_length, 2304 reclaimable_bytes, 2305 reclaimable_perc, threshold); 2306 break; 2307 } 2308 2309 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); 2310 if (check_time_remaining) { 2311 if (predicted_time_ms > time_remaining_ms) { 2312 // Too expensive for the current CSet. 2313 2314 if (old_cset_region_length() >= min_old_cset_length) { 2315 // We have added the minimum number of old regions to the CSet, 2316 // we are done with this CSet. 2317 ergo_verbose4(ErgoCSetConstruction, 2318 "finish adding old regions to CSet", 2319 ergo_format_reason("predicted time is too high") 2320 ergo_format_ms("predicted time") 2321 ergo_format_ms("remaining time") 2322 ergo_format_region("old") 2323 ergo_format_region("min"), 2324 predicted_time_ms, time_remaining_ms, 2325 old_cset_region_length(), min_old_cset_length); 2326 break; 2327 } 2328 2329 // We'll add it anyway given that we haven't reached the 2330 // minimum number of old regions. 2331 expensive_region_num += 1; 2332 } 2333 } else { 2334 if (old_cset_region_length() >= min_old_cset_length) { 2335 // In the non-auto-tuning case, we'll finish adding regions 2336 // to the CSet if we reach the minimum. 2337 ergo_verbose2(ErgoCSetConstruction, 2338 "finish adding old regions to CSet", 2339 ergo_format_reason("old CSet region num reached min") 2340 ergo_format_region("old") 2341 ergo_format_region("min"), 2342 old_cset_region_length(), min_old_cset_length); 2343 break; 2344 } 2345 } 2346 2347 // We will add this region to the CSet. 2348 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2349 predicted_old_time_ms += predicted_time_ms; 2350 cset_chooser->pop(); // already have region via peek() 2351 _g1->old_set_remove(hr); 2352 add_old_region_to_cset(hr); 2353 2354 hr = cset_chooser->peek(); 2355 } 2356 if (hr == NULL) { 2357 ergo_verbose0(ErgoCSetConstruction, 2358 "finish adding old regions to CSet", 2359 ergo_format_reason("candidate old regions not available")); 2360 } 2361 2362 if (expensive_region_num > 0) { 2363 // We print the information once here at the end, predicated on 2364 // whether we added any apparently expensive regions or not, to 2365 // avoid generating output per region. 2366 ergo_verbose4(ErgoCSetConstruction, 2367 "added expensive regions to CSet", 2368 ergo_format_reason("old CSet region num not reached min") 2369 ergo_format_region("old") 2370 ergo_format_region("expensive") 2371 ergo_format_region("min") 2372 ergo_format_ms("remaining time"), 2373 old_cset_region_length(), 2374 expensive_region_num, 2375 min_old_cset_length, 2376 time_remaining_ms); 2377 } 2378 2379 cset_chooser->verify(); 2380 } 2381 2382 stop_incremental_cset_building(); 2383 2384 ergo_verbose3(ErgoCSetConstruction, 2385 "finish choosing CSet", 2386 ergo_format_region("old") 2387 ergo_format_ms("predicted old region time") 2388 ergo_format_ms("time remaining"), 2389 old_cset_region_length(), 2390 predicted_old_time_ms, time_remaining_ms); 2391 2392 double non_young_end_time_sec = os::elapsedTime(); 2393 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2394 } 2395 2396 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) { 2397 if(TraceYoungGenTime) { 2398 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2399 } 2400 } 2401 2402 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) { 2403 if(TraceYoungGenTime) { 2404 _all_yield_times_ms.add(yield_time_ms); 2405 } 2406 } 2407 2408 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2409 if(TraceYoungGenTime) { 2410 _total.add(pause_time_ms); 2411 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2412 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2413 _parallel.add(phase_times->cur_collection_par_time_ms()); 2414 _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan)); 2415 _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering)); 2416 _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS)); 2417 _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS)); 2418 _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy)); 2419 _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination)); 2420 2421 double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) + 2422 phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) + 2423 phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) + 2424 phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) + 2425 phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) + 2426 phase_times->average_time_ms(G1GCPhaseTimes::Termination); 2427 2428 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2429 _parallel_other.add(parallel_other_time); 2430 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2431 } 2432 } 2433 2434 void TraceYoungGenTimeData::increment_young_collection_count() { 2435 if(TraceYoungGenTime) { 2436 ++_young_pause_num; 2437 } 2438 } 2439 2440 void TraceYoungGenTimeData::increment_mixed_collection_count() { 2441 if(TraceYoungGenTime) { 2442 ++_mixed_pause_num; 2443 } 2444 } 2445 2446 void TraceYoungGenTimeData::print_summary(const char* str, 2447 const NumberSeq* seq) const { 2448 double sum = seq->sum(); 2449 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2450 str, sum / 1000.0, seq->avg()); 2451 } 2452 2453 void TraceYoungGenTimeData::print_summary_sd(const char* str, 2454 const NumberSeq* seq) const { 2455 print_summary(str, seq); 2456 gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2457 "(num", seq->num(), seq->sd(), seq->maximum()); 2458 } 2459 2460 void TraceYoungGenTimeData::print() const { 2461 if (!TraceYoungGenTime) { 2462 return; 2463 } 2464 2465 gclog_or_tty->print_cr("ALL PAUSES"); 2466 print_summary_sd(" Total", &_total); 2467 gclog_or_tty->cr(); 2468 gclog_or_tty->cr(); 2469 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2470 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2471 gclog_or_tty->cr(); 2472 2473 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2474 2475 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2476 gclog_or_tty->print_cr("none"); 2477 } else { 2478 print_summary_sd(" Evacuation Pauses", &_total); 2479 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2480 print_summary(" Parallel Time", &_parallel); 2481 print_summary(" Ext Root Scanning", &_ext_root_scan); 2482 print_summary(" SATB Filtering", &_satb_filtering); 2483 print_summary(" Update RS", &_update_rs); 2484 print_summary(" Scan RS", &_scan_rs); 2485 print_summary(" Object Copy", &_obj_copy); 2486 print_summary(" Termination", &_termination); 2487 print_summary(" Parallel Other", &_parallel_other); 2488 print_summary(" Clear CT", &_clear_ct); 2489 print_summary(" Other", &_other); 2490 } 2491 gclog_or_tty->cr(); 2492 2493 gclog_or_tty->print_cr("MISC"); 2494 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2495 print_summary_sd(" Yields", &_all_yield_times_ms); 2496 } 2497 2498 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) { 2499 if (TraceOldGenTime) { 2500 _all_full_gc_times.add(full_gc_time_ms); 2501 } 2502 } 2503 2504 void TraceOldGenTimeData::print() const { 2505 if (!TraceOldGenTime) { 2506 return; 2507 } 2508 2509 if (_all_full_gc_times.num() > 0) { 2510 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2511 _all_full_gc_times.num(), 2512 _all_full_gc_times.sum() / 1000.0); 2513 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2514 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2515 _all_full_gc_times.sd(), 2516 _all_full_gc_times.maximum()); 2517 } 2518 } --- EOF ---