1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentG1Refine.hpp" 27 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 33 #include "gc_implementation/g1/g1Log.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "gc_implementation/shared/gcPolicyCounters.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/mutexLocker.hpp" 39 #include "utilities/debug.hpp" 40 41 // Different defaults for different number of GC threads 42 // They were chosen by running GCOld and SPECjbb on debris with different 43 // numbers of GC threads and choosing them based on the results 44 45 // all the same 46 static double rs_length_diff_defaults[] = { 47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 48 }; 49 50 static double cost_per_card_ms_defaults[] = { 51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 52 }; 53 54 // all the same 55 static double young_cards_per_entry_ratio_defaults[] = { 56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 57 }; 58 59 static double cost_per_entry_ms_defaults[] = { 60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005 61 }; 62 63 static double cost_per_byte_ms_defaults[] = { 64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009 65 }; 66 67 // these should be pretty consistent 68 static double constant_other_time_ms_defaults[] = { 69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 70 }; 71 72 73 static double young_other_cost_per_region_ms_defaults[] = { 74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1 75 }; 76 77 static double non_young_other_cost_per_region_ms_defaults[] = { 78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30 79 }; 80 81 G1CollectorPolicy::G1CollectorPolicy() : 82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 83 ? ParallelGCThreads : 1), 84 85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 86 _stop_world_start(0.0), 87 88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 90 91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 92 _prev_collection_pause_end_ms(0.0), 93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), 94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), 97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), 101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), 103 _non_young_other_cost_per_region_ms_seq( 104 new TruncatedSeq(TruncatedSeqLength)), 105 106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)), 107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)), 108 109 _pause_time_target_ms((double) MaxGCPauseMillis), 110 111 _gcs_are_young(true), 112 113 _during_marking(false), 114 _in_marking_window(false), 115 _in_marking_window_im(false), 116 117 _recent_prev_end_times_for_all_gcs_sec( 118 new TruncatedSeq(NumPrevPausesForHeuristics)), 119 120 _recent_avg_pause_time_ratio(0.0), 121 122 _initiate_conc_mark_if_possible(false), 123 _during_initial_mark_pause(false), 124 _last_young_gc(false), 125 _last_gc_was_young(false), 126 127 _eden_used_bytes_before_gc(0), 128 _survivor_used_bytes_before_gc(0), 129 _heap_used_bytes_before_gc(0), 130 _metaspace_used_bytes_before_gc(0), 131 _eden_capacity_bytes_before_gc(0), 132 _heap_capacity_bytes_before_gc(0), 133 134 _eden_cset_region_length(0), 135 _survivor_cset_region_length(0), 136 _old_cset_region_length(0), 137 138 _collection_set(NULL), 139 _collection_set_bytes_used_before(0), 140 141 // Incremental CSet attributes 142 _inc_cset_build_state(Inactive), 143 _inc_cset_head(NULL), 144 _inc_cset_tail(NULL), 145 _inc_cset_bytes_used_before(0), 146 _inc_cset_max_finger(NULL), 147 _inc_cset_recorded_rs_lengths(0), 148 _inc_cset_recorded_rs_lengths_diffs(0), 149 _inc_cset_predicted_elapsed_time_ms(0.0), 150 _inc_cset_predicted_elapsed_time_ms_diffs(0.0), 151 152 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away 153 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list 154 #endif // _MSC_VER 155 156 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived", 157 G1YoungSurvRateNumRegionsSummary)), 158 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor", 159 G1YoungSurvRateNumRegionsSummary)), 160 // add here any more surv rate groups 161 _recorded_survivor_regions(0), 162 _recorded_survivor_head(NULL), 163 _recorded_survivor_tail(NULL), 164 _survivors_age_table(true), 165 166 _gc_overhead_perc(0.0) { 167 168 G1Log::init(); 169 170 // Set up the region size and associated fields. Given that the 171 // policy is created before the heap, we have to set this up here, 172 // so it's done as soon as possible 173 174 // It would have been natural to pass initial_heap_byte_size() and 175 // max_heap_byte_size() to setup_heap_region_size() but those have 176 // not been set up at this point since they should be aligned with 177 // the region size. So, there is a circular dependency here. We base 178 // the region size on the heap size, but the heap size should be 179 // aligned with the region size. To get around this we use the 180 // unaligned values for the heap. 181 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); 182 HeapRegionRemSet::setup_remset_size(); 183 184 G1ErgoVerbose::initialize(); 185 if (PrintAdaptiveSizePolicy) { 186 // Currently, we only use a single switch for all the heuristics. 187 G1ErgoVerbose::set_enabled(true); 188 // Given that we don't currently have a verboseness level 189 // parameter, we'll hardcode this to high. This can be easily 190 // changed in the future. 191 G1ErgoVerbose::set_level(ErgoHigh); 192 } else { 193 G1ErgoVerbose::set_enabled(false); 194 } 195 196 // Verify PLAB sizes 197 const size_t region_size = HeapRegion::GrainWords; 198 if (YoungPLABSize > region_size || OldPLABSize > region_size) { 199 char buffer[128]; 200 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT, 201 OldPLABSize > region_size ? "Old" : "Young", region_size); 202 vm_exit_during_initialization(buffer); 203 } 204 205 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 206 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 207 208 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads); 209 210 int index = MIN2(_parallel_gc_threads - 1, 7); 211 212 _rs_length_diff_seq->add(rs_length_diff_defaults[index]); 213 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); 214 _young_cards_per_entry_ratio_seq->add( 215 young_cards_per_entry_ratio_defaults[index]); 216 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); 217 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]); 218 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]); 219 _young_other_cost_per_region_ms_seq->add( 220 young_other_cost_per_region_ms_defaults[index]); 221 _non_young_other_cost_per_region_ms_seq->add( 222 non_young_other_cost_per_region_ms_defaults[index]); 223 224 // Below, we might need to calculate the pause time target based on 225 // the pause interval. When we do so we are going to give G1 maximum 226 // flexibility and allow it to do pauses when it needs to. So, we'll 227 // arrange that the pause interval to be pause time target + 1 to 228 // ensure that a) the pause time target is maximized with respect to 229 // the pause interval and b) we maintain the invariant that pause 230 // time target < pause interval. If the user does not want this 231 // maximum flexibility, they will have to set the pause interval 232 // explicitly. 233 234 // First make sure that, if either parameter is set, its value is 235 // reasonable. 236 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 237 if (MaxGCPauseMillis < 1) { 238 vm_exit_during_initialization("MaxGCPauseMillis should be " 239 "greater than 0"); 240 } 241 } 242 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 243 if (GCPauseIntervalMillis < 1) { 244 vm_exit_during_initialization("GCPauseIntervalMillis should be " 245 "greater than 0"); 246 } 247 } 248 249 // Then, if the pause time target parameter was not set, set it to 250 // the default value. 251 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 252 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 253 // The default pause time target in G1 is 200ms 254 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 255 } else { 256 // We do not allow the pause interval to be set without the 257 // pause time target 258 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 259 "without setting MaxGCPauseMillis"); 260 } 261 } 262 263 // Then, if the interval parameter was not set, set it according to 264 // the pause time target (this will also deal with the case when the 265 // pause time target is the default value). 266 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 267 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 268 } 269 270 // Finally, make sure that the two parameters are consistent. 271 if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 272 char buffer[256]; 273 jio_snprintf(buffer, 256, 274 "MaxGCPauseMillis (%u) should be less than " 275 "GCPauseIntervalMillis (%u)", 276 MaxGCPauseMillis, GCPauseIntervalMillis); 277 vm_exit_during_initialization(buffer); 278 } 279 280 double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 281 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 282 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 283 284 uintx confidence_perc = G1ConfidencePercent; 285 // Put an artificial ceiling on this so that it's not set to a silly value. 286 if (confidence_perc > 100) { 287 confidence_perc = 100; 288 warning("G1ConfidencePercent is set to a value that is too large, " 289 "it's been updated to %u", confidence_perc); 290 } 291 _sigma = (double) confidence_perc / 100.0; 292 293 // start conservatively (around 50ms is about right) 294 _concurrent_mark_remark_times_ms->add(0.05); 295 _concurrent_mark_cleanup_times_ms->add(0.20); 296 _tenuring_threshold = MaxTenuringThreshold; 297 // _max_survivor_regions will be calculated by 298 // update_young_list_target_length() during initialization. 299 _max_survivor_regions = 0; 300 301 assert(GCTimeRatio > 0, 302 "we should have set it to a default value set_g1_gc_flags() " 303 "if a user set it to 0"); 304 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 305 306 uintx reserve_perc = G1ReservePercent; 307 // Put an artificial ceiling on this so that it's not set to a silly value. 308 if (reserve_perc > 50) { 309 reserve_perc = 50; 310 warning("G1ReservePercent is set to a value that is too large, " 311 "it's been updated to %u", reserve_perc); 312 } 313 _reserve_factor = (double) reserve_perc / 100.0; 314 // This will be set when the heap is expanded 315 // for the first time during initialization. 316 _reserve_regions = 0; 317 318 initialize_all(); 319 _collectionSetChooser = new CollectionSetChooser(); 320 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags 321 } 322 323 void G1CollectorPolicy::initialize_flags() { 324 set_min_alignment(HeapRegion::GrainBytes); 325 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); 326 set_max_alignment(MAX2(card_table_alignment, min_alignment())); 327 if (SurvivorRatio < 1) { 328 vm_exit_during_initialization("Invalid survivor ratio specified"); 329 } 330 CollectorPolicy::initialize_flags(); 331 } 332 333 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { 334 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); 335 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); 336 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); 337 338 if (FLAG_IS_CMDLINE(NewRatio)) { 339 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { 340 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); 341 } else { 342 _sizer_kind = SizerNewRatio; 343 _adaptive_size = false; 344 return; 345 } 346 } 347 348 if (FLAG_IS_CMDLINE(NewSize)) { 349 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes), 350 1U); 351 if (FLAG_IS_CMDLINE(MaxNewSize)) { 352 _max_desired_young_length = 353 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 354 1U); 355 _sizer_kind = SizerMaxAndNewSize; 356 _adaptive_size = _min_desired_young_length == _max_desired_young_length; 357 } else { 358 _sizer_kind = SizerNewSizeOnly; 359 } 360 } else if (FLAG_IS_CMDLINE(MaxNewSize)) { 361 _max_desired_young_length = 362 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes), 363 1U); 364 _sizer_kind = SizerMaxNewSizeOnly; 365 } 366 } 367 368 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) { 369 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100; 370 return MAX2(1U, default_value); 371 } 372 373 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) { 374 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100; 375 return MAX2(1U, default_value); 376 } 377 378 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { 379 assert(new_number_of_heap_regions > 0, "Heap must be initialized"); 380 381 switch (_sizer_kind) { 382 case SizerDefaults: 383 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 384 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 385 break; 386 case SizerNewSizeOnly: 387 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); 388 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); 389 break; 390 case SizerMaxNewSizeOnly: 391 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); 392 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); 393 break; 394 case SizerMaxAndNewSize: 395 // Do nothing. Values set on the command line, don't update them at runtime. 396 break; 397 case SizerNewRatio: 398 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); 399 _max_desired_young_length = _min_desired_young_length; 400 break; 401 default: 402 ShouldNotReachHere(); 403 } 404 405 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); 406 } 407 408 void G1CollectorPolicy::init() { 409 // Set aside an initial future to_space. 410 _g1 = G1CollectedHeap::heap(); 411 412 assert(Heap_lock->owned_by_self(), "Locking discipline."); 413 414 initialize_gc_policy_counters(); 415 416 if (adaptive_young_list_length()) { 417 _young_list_fixed_length = 0; 418 } else { 419 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); 420 } 421 _free_regions_at_end_of_collection = _g1->free_regions(); 422 update_young_list_target_length(); 423 424 // We may immediately start allocating regions and placing them on the 425 // collection set list. Initialize the per-collection set info 426 start_incremental_cset_building(); 427 } 428 429 // Create the jstat counters for the policy. 430 void G1CollectorPolicy::initialize_gc_policy_counters() { 431 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3); 432 } 433 434 bool G1CollectorPolicy::predict_will_fit(uint young_length, 435 double base_time_ms, 436 uint base_free_regions, 437 double target_pause_time_ms) { 438 if (young_length >= base_free_regions) { 439 // end condition 1: not enough space for the young regions 440 return false; 441 } 442 443 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1); 444 size_t bytes_to_copy = 445 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); 446 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); 447 double young_other_time_ms = predict_young_other_time_ms(young_length); 448 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms; 449 if (pause_time_ms > target_pause_time_ms) { 450 // end condition 2: prediction is over the target pause time 451 return false; 452 } 453 454 size_t free_bytes = 455 (base_free_regions - young_length) * HeapRegion::GrainBytes; 456 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) { 457 // end condition 3: out-of-space (conservatively!) 458 return false; 459 } 460 461 // success! 462 return true; 463 } 464 465 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) { 466 // re-calculate the necessary reserve 467 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor; 468 // We use ceiling so that if reserve_regions_d is > 0.0 (but 469 // smaller than 1.0) we'll get 1. 470 _reserve_regions = (uint) ceil(reserve_regions_d); 471 472 _young_gen_sizer->heap_size_changed(new_number_of_regions); 473 } 474 475 uint G1CollectorPolicy::calculate_young_list_desired_min_length( 476 uint base_min_length) { 477 uint desired_min_length = 0; 478 if (adaptive_young_list_length()) { 479 if (_alloc_rate_ms_seq->num() > 3) { 480 double now_sec = os::elapsedTime(); 481 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; 482 double alloc_rate_ms = predict_alloc_rate_ms(); 483 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms); 484 } else { 485 // otherwise we don't have enough info to make the prediction 486 } 487 } 488 desired_min_length += base_min_length; 489 // make sure we don't go below any user-defined minimum bound 490 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length); 491 } 492 493 uint G1CollectorPolicy::calculate_young_list_desired_max_length() { 494 // Here, we might want to also take into account any additional 495 // constraints (i.e., user-defined minimum bound). Currently, we 496 // effectively don't set this bound. 497 return _young_gen_sizer->max_desired_young_length(); 498 } 499 500 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) { 501 if (rs_lengths == (size_t) -1) { 502 // if it's set to the default value (-1), we should predict it; 503 // otherwise, use the given value. 504 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); 505 } 506 507 // Calculate the absolute and desired min bounds. 508 509 // This is how many young regions we already have (currently: the survivors). 510 uint base_min_length = recorded_survivor_regions(); 511 // This is the absolute minimum young length, which ensures that we 512 // can allocate one eden region in the worst-case. 513 uint absolute_min_length = base_min_length + 1; 514 uint desired_min_length = 515 calculate_young_list_desired_min_length(base_min_length); 516 if (desired_min_length < absolute_min_length) { 517 desired_min_length = absolute_min_length; 518 } 519 520 // Calculate the absolute and desired max bounds. 521 522 // We will try our best not to "eat" into the reserve. 523 uint absolute_max_length = 0; 524 if (_free_regions_at_end_of_collection > _reserve_regions) { 525 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions; 526 } 527 uint desired_max_length = calculate_young_list_desired_max_length(); 528 if (desired_max_length > absolute_max_length) { 529 desired_max_length = absolute_max_length; 530 } 531 532 uint young_list_target_length = 0; 533 if (adaptive_young_list_length()) { 534 if (gcs_are_young()) { 535 young_list_target_length = 536 calculate_young_list_target_length(rs_lengths, 537 base_min_length, 538 desired_min_length, 539 desired_max_length); 540 _rs_lengths_prediction = rs_lengths; 541 } else { 542 // Don't calculate anything and let the code below bound it to 543 // the desired_min_length, i.e., do the next GC as soon as 544 // possible to maximize how many old regions we can add to it. 545 } 546 } else { 547 // The user asked for a fixed young gen so we'll fix the young gen 548 // whether the next GC is young or mixed. 549 young_list_target_length = _young_list_fixed_length; 550 } 551 552 // Make sure we don't go over the desired max length, nor under the 553 // desired min length. In case they clash, desired_min_length wins 554 // which is why that test is second. 555 if (young_list_target_length > desired_max_length) { 556 young_list_target_length = desired_max_length; 557 } 558 if (young_list_target_length < desired_min_length) { 559 young_list_target_length = desired_min_length; 560 } 561 562 assert(young_list_target_length > recorded_survivor_regions(), 563 "we should be able to allocate at least one eden region"); 564 assert(young_list_target_length >= absolute_min_length, "post-condition"); 565 _young_list_target_length = young_list_target_length; 566 567 update_max_gc_locker_expansion(); 568 } 569 570 uint 571 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths, 572 uint base_min_length, 573 uint desired_min_length, 574 uint desired_max_length) { 575 assert(adaptive_young_list_length(), "pre-condition"); 576 assert(gcs_are_young(), "only call this for young GCs"); 577 578 // In case some edge-condition makes the desired max length too small... 579 if (desired_max_length <= desired_min_length) { 580 return desired_min_length; 581 } 582 583 // We'll adjust min_young_length and max_young_length not to include 584 // the already allocated young regions (i.e., so they reflect the 585 // min and max eden regions we'll allocate). The base_min_length 586 // will be reflected in the predictions by the 587 // survivor_regions_evac_time prediction. 588 assert(desired_min_length > base_min_length, "invariant"); 589 uint min_young_length = desired_min_length - base_min_length; 590 assert(desired_max_length > base_min_length, "invariant"); 591 uint max_young_length = desired_max_length - base_min_length; 592 593 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 594 double survivor_regions_evac_time = predict_survivor_regions_evac_time(); 595 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); 596 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); 597 size_t scanned_cards = predict_young_card_num(adj_rs_lengths); 598 double base_time_ms = 599 predict_base_elapsed_time_ms(pending_cards, scanned_cards) + 600 survivor_regions_evac_time; 601 uint available_free_regions = _free_regions_at_end_of_collection; 602 uint base_free_regions = 0; 603 if (available_free_regions > _reserve_regions) { 604 base_free_regions = available_free_regions - _reserve_regions; 605 } 606 607 // Here, we will make sure that the shortest young length that 608 // makes sense fits within the target pause time. 609 610 if (predict_will_fit(min_young_length, base_time_ms, 611 base_free_regions, target_pause_time_ms)) { 612 // The shortest young length will fit into the target pause time; 613 // we'll now check whether the absolute maximum number of young 614 // regions will fit in the target pause time. If not, we'll do 615 // a binary search between min_young_length and max_young_length. 616 if (predict_will_fit(max_young_length, base_time_ms, 617 base_free_regions, target_pause_time_ms)) { 618 // The maximum young length will fit into the target pause time. 619 // We are done so set min young length to the maximum length (as 620 // the result is assumed to be returned in min_young_length). 621 min_young_length = max_young_length; 622 } else { 623 // The maximum possible number of young regions will not fit within 624 // the target pause time so we'll search for the optimal 625 // length. The loop invariants are: 626 // 627 // min_young_length < max_young_length 628 // min_young_length is known to fit into the target pause time 629 // max_young_length is known not to fit into the target pause time 630 // 631 // Going into the loop we know the above hold as we've just 632 // checked them. Every time around the loop we check whether 633 // the middle value between min_young_length and 634 // max_young_length fits into the target pause time. If it 635 // does, it becomes the new min. If it doesn't, it becomes 636 // the new max. This way we maintain the loop invariants. 637 638 assert(min_young_length < max_young_length, "invariant"); 639 uint diff = (max_young_length - min_young_length) / 2; 640 while (diff > 0) { 641 uint young_length = min_young_length + diff; 642 if (predict_will_fit(young_length, base_time_ms, 643 base_free_regions, target_pause_time_ms)) { 644 min_young_length = young_length; 645 } else { 646 max_young_length = young_length; 647 } 648 assert(min_young_length < max_young_length, "invariant"); 649 diff = (max_young_length - min_young_length) / 2; 650 } 651 // The results is min_young_length which, according to the 652 // loop invariants, should fit within the target pause time. 653 654 // These are the post-conditions of the binary search above: 655 assert(min_young_length < max_young_length, 656 "otherwise we should have discovered that max_young_length " 657 "fits into the pause target and not done the binary search"); 658 assert(predict_will_fit(min_young_length, base_time_ms, 659 base_free_regions, target_pause_time_ms), 660 "min_young_length, the result of the binary search, should " 661 "fit into the pause target"); 662 assert(!predict_will_fit(min_young_length + 1, base_time_ms, 663 base_free_regions, target_pause_time_ms), 664 "min_young_length, the result of the binary search, should be " 665 "optimal, so no larger length should fit into the pause target"); 666 } 667 } else { 668 // Even the minimum length doesn't fit into the pause time 669 // target, return it as the result nevertheless. 670 } 671 return base_min_length + min_young_length; 672 } 673 674 double G1CollectorPolicy::predict_survivor_regions_evac_time() { 675 double survivor_regions_evac_time = 0.0; 676 for (HeapRegion * r = _recorded_survivor_head; 677 r != NULL && r != _recorded_survivor_tail->get_next_young_region(); 678 r = r->get_next_young_region()) { 679 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); 680 } 681 return survivor_regions_evac_time; 682 } 683 684 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() { 685 guarantee( adaptive_young_list_length(), "should not call this otherwise" ); 686 687 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); 688 if (rs_lengths > _rs_lengths_prediction) { 689 // add 10% to avoid having to recalculate often 690 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; 691 update_young_list_target_length(rs_lengths_prediction); 692 } 693 } 694 695 696 697 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size, 698 bool is_tlab, 699 bool* gc_overhead_limit_was_exceeded) { 700 guarantee(false, "Not using this policy feature yet."); 701 return NULL; 702 } 703 704 // This method controls how a collector handles one or more 705 // of its generations being fully allocated. 706 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size, 707 bool is_tlab) { 708 guarantee(false, "Not using this policy feature yet."); 709 return NULL; 710 } 711 712 713 #ifndef PRODUCT 714 bool G1CollectorPolicy::verify_young_ages() { 715 HeapRegion* head = _g1->young_list()->first_region(); 716 return 717 verify_young_ages(head, _short_lived_surv_rate_group); 718 // also call verify_young_ages on any additional surv rate groups 719 } 720 721 bool 722 G1CollectorPolicy::verify_young_ages(HeapRegion* head, 723 SurvRateGroup *surv_rate_group) { 724 guarantee( surv_rate_group != NULL, "pre-condition" ); 725 726 const char* name = surv_rate_group->name(); 727 bool ret = true; 728 int prev_age = -1; 729 730 for (HeapRegion* curr = head; 731 curr != NULL; 732 curr = curr->get_next_young_region()) { 733 SurvRateGroup* group = curr->surv_rate_group(); 734 if (group == NULL && !curr->is_survivor()) { 735 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); 736 ret = false; 737 } 738 739 if (surv_rate_group == group) { 740 int age = curr->age_in_surv_rate_group(); 741 742 if (age < 0) { 743 gclog_or_tty->print_cr("## %s: encountered negative age", name); 744 ret = false; 745 } 746 747 if (age <= prev_age) { 748 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " 749 "(%d, %d)", name, age, prev_age); 750 ret = false; 751 } 752 prev_age = age; 753 } 754 } 755 756 return ret; 757 } 758 #endif // PRODUCT 759 760 void G1CollectorPolicy::record_full_collection_start() { 761 _full_collection_start_sec = os::elapsedTime(); 762 record_heap_size_info_at_start(true /* full */); 763 // Release the future to-space so that it is available for compaction into. 764 _g1->set_full_collection(); 765 } 766 767 void G1CollectorPolicy::record_full_collection_end() { 768 // Consider this like a collection pause for the purposes of allocation 769 // since last pause. 770 double end_sec = os::elapsedTime(); 771 double full_gc_time_sec = end_sec - _full_collection_start_sec; 772 double full_gc_time_ms = full_gc_time_sec * 1000.0; 773 774 _trace_gen1_time_data.record_full_collection(full_gc_time_ms); 775 776 update_recent_gc_times(end_sec, full_gc_time_ms); 777 778 _g1->clear_full_collection(); 779 780 // "Nuke" the heuristics that control the young/mixed GC 781 // transitions and make sure we start with young GCs after the Full GC. 782 set_gcs_are_young(true); 783 _last_young_gc = false; 784 clear_initiate_conc_mark_if_possible(); 785 clear_during_initial_mark_pause(); 786 _in_marking_window = false; 787 _in_marking_window_im = false; 788 789 _short_lived_surv_rate_group->start_adding_regions(); 790 // also call this on any additional surv rate groups 791 792 record_survivor_regions(0, NULL, NULL); 793 794 _free_regions_at_end_of_collection = _g1->free_regions(); 795 // Reset survivors SurvRateGroup. 796 _survivor_surv_rate_group->reset(); 797 update_young_list_target_length(); 798 _collectionSetChooser->clear(); 799 } 800 801 void G1CollectorPolicy::record_stop_world_start() { 802 _stop_world_start = os::elapsedTime(); 803 } 804 805 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { 806 // We only need to do this here as the policy will only be applied 807 // to the GC we're about to start. so, no point is calculating this 808 // every time we calculate / recalculate the target young length. 809 update_survivors_policy(); 810 811 assert(_g1->used() == _g1->recalculate_used(), 812 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT, 813 _g1->used(), _g1->recalculate_used())); 814 815 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; 816 _trace_gen0_time_data.record_start_collection(s_w_t_ms); 817 _stop_world_start = 0.0; 818 819 record_heap_size_info_at_start(false /* full */); 820 821 phase_times()->record_cur_collection_start_sec(start_time_sec); 822 _pending_cards = _g1->pending_card_num(); 823 824 _collection_set_bytes_used_before = 0; 825 _bytes_copied_during_gc = 0; 826 827 _last_gc_was_young = false; 828 829 // do that for any other surv rate groups 830 _short_lived_surv_rate_group->stop_adding_regions(); 831 _survivors_age_table.clear(); 832 833 assert( verify_young_ages(), "region age verification" ); 834 } 835 836 void G1CollectorPolicy::record_concurrent_mark_init_end(double 837 mark_init_elapsed_time_ms) { 838 _during_marking = true; 839 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 840 clear_during_initial_mark_pause(); 841 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 842 } 843 844 void G1CollectorPolicy::record_concurrent_mark_remark_start() { 845 _mark_remark_start_sec = os::elapsedTime(); 846 _during_marking = false; 847 } 848 849 void G1CollectorPolicy::record_concurrent_mark_remark_end() { 850 double end_time_sec = os::elapsedTime(); 851 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; 852 _concurrent_mark_remark_times_ms->add(elapsed_time_ms); 853 _cur_mark_stop_world_time_ms += elapsed_time_ms; 854 _prev_collection_pause_end_ms += elapsed_time_ms; 855 856 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true); 857 } 858 859 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { 860 _mark_cleanup_start_sec = os::elapsedTime(); 861 } 862 863 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { 864 _last_young_gc = true; 865 _in_marking_window = false; 866 } 867 868 void G1CollectorPolicy::record_concurrent_pause() { 869 if (_stop_world_start > 0.0) { 870 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; 871 _trace_gen0_time_data.record_yield_time(yield_ms); 872 } 873 } 874 875 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) { 876 if (_g1->concurrent_mark()->cmThread()->during_cycle()) { 877 return false; 878 } 879 880 size_t marking_initiating_used_threshold = 881 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 882 size_t cur_used_bytes = _g1->non_young_capacity_bytes(); 883 size_t alloc_byte_size = alloc_word_size * HeapWordSize; 884 885 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { 886 if (gcs_are_young() && !_last_young_gc) { 887 ergo_verbose5(ErgoConcCycles, 888 "request concurrent cycle initiation", 889 ergo_format_reason("occupancy higher than threshold") 890 ergo_format_byte("occupancy") 891 ergo_format_byte("allocation request") 892 ergo_format_byte_perc("threshold") 893 ergo_format_str("source"), 894 cur_used_bytes, 895 alloc_byte_size, 896 marking_initiating_used_threshold, 897 (double) InitiatingHeapOccupancyPercent, 898 source); 899 return true; 900 } else { 901 ergo_verbose5(ErgoConcCycles, 902 "do not request concurrent cycle initiation", 903 ergo_format_reason("still doing mixed collections") 904 ergo_format_byte("occupancy") 905 ergo_format_byte("allocation request") 906 ergo_format_byte_perc("threshold") 907 ergo_format_str("source"), 908 cur_used_bytes, 909 alloc_byte_size, 910 marking_initiating_used_threshold, 911 (double) InitiatingHeapOccupancyPercent, 912 source); 913 } 914 } 915 916 return false; 917 } 918 919 // Anything below that is considered to be zero 920 #define MIN_TIMER_GRANULARITY 0.0000001 921 922 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { 923 double end_time_sec = os::elapsedTime(); 924 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), 925 "otherwise, the subtraction below does not make sense"); 926 size_t rs_size = 927 _cur_collection_pause_used_regions_at_start - cset_region_length(); 928 size_t cur_used_bytes = _g1->used(); 929 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 930 bool last_pause_included_initial_mark = false; 931 bool update_stats = !_g1->evacuation_failed(); 932 933 #ifndef PRODUCT 934 if (G1YoungSurvRateVerbose) { 935 gclog_or_tty->print_cr(""); 936 _short_lived_surv_rate_group->print(); 937 // do that for any other surv rate groups too 938 } 939 #endif // PRODUCT 940 941 last_pause_included_initial_mark = during_initial_mark_pause(); 942 if (last_pause_included_initial_mark) { 943 record_concurrent_mark_init_end(0.0); 944 } else if (need_to_start_conc_mark("end of GC")) { 945 // Note: this might have already been set, if during the last 946 // pause we decided to start a cycle but at the beginning of 947 // this pause we decided to postpone it. That's OK. 948 set_initiate_conc_mark_if_possible(); 949 } 950 951 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, 952 end_time_sec, false); 953 954 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); 955 evacuation_info.set_bytes_copied(_bytes_copied_during_gc); 956 957 if (update_stats) { 958 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); 959 // this is where we update the allocation rate of the application 960 double app_time_ms = 961 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); 962 if (app_time_ms < MIN_TIMER_GRANULARITY) { 963 // This usually happens due to the timer not having the required 964 // granularity. Some Linuxes are the usual culprits. 965 // We'll just set it to something (arbitrarily) small. 966 app_time_ms = 1.0; 967 } 968 // We maintain the invariant that all objects allocated by mutator 969 // threads will be allocated out of eden regions. So, we can use 970 // the eden region number allocated since the previous GC to 971 // calculate the application's allocate rate. The only exception 972 // to that is humongous objects that are allocated separately. But 973 // given that humongous object allocations do not really affect 974 // either the pause's duration nor when the next pause will take 975 // place we can safely ignore them here. 976 uint regions_allocated = eden_cset_region_length(); 977 double alloc_rate_ms = (double) regions_allocated / app_time_ms; 978 _alloc_rate_ms_seq->add(alloc_rate_ms); 979 980 double interval_ms = 981 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0; 982 update_recent_gc_times(end_time_sec, pause_time_ms); 983 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms; 984 if (recent_avg_pause_time_ratio() < 0.0 || 985 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) { 986 #ifndef PRODUCT 987 // Dump info to allow post-facto debugging 988 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds"); 989 gclog_or_tty->print_cr("-------------------------------------------"); 990 gclog_or_tty->print_cr("Recent GC Times (ms):"); 991 _recent_gc_times_ms->dump(); 992 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec); 993 _recent_prev_end_times_for_all_gcs_sec->dump(); 994 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f", 995 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio()); 996 // In debug mode, terminate the JVM if the user wants to debug at this point. 997 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above"); 998 #endif // !PRODUCT 999 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in 1000 // CR 6902692 by redoing the manner in which the ratio is incrementally computed. 1001 if (_recent_avg_pause_time_ratio < 0.0) { 1002 _recent_avg_pause_time_ratio = 0.0; 1003 } else { 1004 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant"); 1005 _recent_avg_pause_time_ratio = 1.0; 1006 } 1007 } 1008 } 1009 1010 bool new_in_marking_window = _in_marking_window; 1011 bool new_in_marking_window_im = false; 1012 if (during_initial_mark_pause()) { 1013 new_in_marking_window = true; 1014 new_in_marking_window_im = true; 1015 } 1016 1017 if (_last_young_gc) { 1018 // This is supposed to to be the "last young GC" before we start 1019 // doing mixed GCs. Here we decide whether to start mixed GCs or not. 1020 1021 if (!last_pause_included_initial_mark) { 1022 if (next_gc_should_be_mixed("start mixed GCs", 1023 "do not start mixed GCs")) { 1024 set_gcs_are_young(false); 1025 } 1026 } else { 1027 ergo_verbose0(ErgoMixedGCs, 1028 "do not start mixed GCs", 1029 ergo_format_reason("concurrent cycle is about to start")); 1030 } 1031 _last_young_gc = false; 1032 } 1033 1034 if (!_last_gc_was_young) { 1035 // This is a mixed GC. Here we decide whether to continue doing 1036 // mixed GCs or not. 1037 1038 if (!next_gc_should_be_mixed("continue mixed GCs", 1039 "do not continue mixed GCs")) { 1040 set_gcs_are_young(true); 1041 } 1042 } 1043 1044 _short_lived_surv_rate_group->start_adding_regions(); 1045 // do that for any other surv rate groupsx 1046 1047 if (update_stats) { 1048 double cost_per_card_ms = 0.0; 1049 if (_pending_cards > 0) { 1050 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; 1051 _cost_per_card_ms_seq->add(cost_per_card_ms); 1052 } 1053 1054 size_t cards_scanned = _g1->cards_scanned(); 1055 1056 double cost_per_entry_ms = 0.0; 1057 if (cards_scanned > 10) { 1058 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; 1059 if (_last_gc_was_young) { 1060 _cost_per_entry_ms_seq->add(cost_per_entry_ms); 1061 } else { 1062 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); 1063 } 1064 } 1065 1066 if (_max_rs_lengths > 0) { 1067 double cards_per_entry_ratio = 1068 (double) cards_scanned / (double) _max_rs_lengths; 1069 if (_last_gc_was_young) { 1070 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1071 } else { 1072 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); 1073 } 1074 } 1075 1076 // This is defensive. For a while _max_rs_lengths could get 1077 // smaller than _recorded_rs_lengths which was causing 1078 // rs_length_diff to get very large and mess up the RSet length 1079 // predictions. The reason was unsafe concurrent updates to the 1080 // _inc_cset_recorded_rs_lengths field which the code below guards 1081 // against (see CR 7118202). This bug has now been fixed (see CR 1082 // 7119027). However, I'm still worried that 1083 // _inc_cset_recorded_rs_lengths might still end up somewhat 1084 // inaccurate. The concurrent refinement thread calculates an 1085 // RSet's length concurrently with other CR threads updating it 1086 // which might cause it to calculate the length incorrectly (if, 1087 // say, it's in mid-coarsening). So I'll leave in the defensive 1088 // conditional below just in case. 1089 size_t rs_length_diff = 0; 1090 if (_max_rs_lengths > _recorded_rs_lengths) { 1091 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; 1092 } 1093 _rs_length_diff_seq->add((double) rs_length_diff); 1094 1095 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes; 1096 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; 1097 double cost_per_byte_ms = 0.0; 1098 1099 if (copied_bytes > 0) { 1100 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; 1101 if (_in_marking_window) { 1102 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); 1103 } else { 1104 _cost_per_byte_ms_seq->add(cost_per_byte_ms); 1105 } 1106 } 1107 1108 double all_other_time_ms = pause_time_ms - 1109 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() 1110 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); 1111 1112 double young_other_time_ms = 0.0; 1113 if (young_cset_region_length() > 0) { 1114 young_other_time_ms = 1115 phase_times()->young_cset_choice_time_ms() + 1116 phase_times()->young_free_cset_time_ms(); 1117 _young_other_cost_per_region_ms_seq->add(young_other_time_ms / 1118 (double) young_cset_region_length()); 1119 } 1120 double non_young_other_time_ms = 0.0; 1121 if (old_cset_region_length() > 0) { 1122 non_young_other_time_ms = 1123 phase_times()->non_young_cset_choice_time_ms() + 1124 phase_times()->non_young_free_cset_time_ms(); 1125 1126 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms / 1127 (double) old_cset_region_length()); 1128 } 1129 1130 double constant_other_time_ms = all_other_time_ms - 1131 (young_other_time_ms + non_young_other_time_ms); 1132 _constant_other_time_ms_seq->add(constant_other_time_ms); 1133 1134 double survival_ratio = 0.0; 1135 if (_collection_set_bytes_used_before > 0) { 1136 survival_ratio = (double) _bytes_copied_during_gc / 1137 (double) _collection_set_bytes_used_before; 1138 } 1139 1140 _pending_cards_seq->add((double) _pending_cards); 1141 _rs_lengths_seq->add((double) _max_rs_lengths); 1142 } 1143 1144 _in_marking_window = new_in_marking_window; 1145 _in_marking_window_im = new_in_marking_window_im; 1146 _free_regions_at_end_of_collection = _g1->free_regions(); 1147 update_young_list_target_length(); 1148 1149 // Note that _mmu_tracker->max_gc_time() returns the time in seconds. 1150 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 1151 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), 1152 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); 1153 1154 _collectionSetChooser->verify(); 1155 } 1156 1157 #define EXT_SIZE_FORMAT "%.1f%s" 1158 #define EXT_SIZE_PARAMS(bytes) \ 1159 byte_size_in_proper_unit((double)(bytes)), \ 1160 proper_unit_for_byte_size((bytes)) 1161 1162 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) { 1163 YoungList* young_list = _g1->young_list(); 1164 _eden_used_bytes_before_gc = young_list->eden_used_bytes(); 1165 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); 1166 _heap_capacity_bytes_before_gc = _g1->capacity(); 1167 _heap_used_bytes_before_gc = _g1->used(); 1168 _cur_collection_pause_used_regions_at_start = _g1->used_regions(); 1169 1170 _eden_capacity_bytes_before_gc = 1171 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; 1172 1173 if (full) { 1174 _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes(); 1175 } 1176 } 1177 1178 void G1CollectorPolicy::print_heap_transition() { 1179 _g1->print_size_transition(gclog_or_tty, 1180 _heap_used_bytes_before_gc, 1181 _g1->used(), 1182 _g1->capacity()); 1183 } 1184 1185 void G1CollectorPolicy::print_detailed_heap_transition(bool full) { 1186 YoungList* young_list = _g1->young_list(); 1187 1188 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); 1189 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); 1190 size_t heap_used_bytes_after_gc = _g1->used(); 1191 1192 size_t heap_capacity_bytes_after_gc = _g1->capacity(); 1193 size_t eden_capacity_bytes_after_gc = 1194 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; 1195 1196 gclog_or_tty->print( 1197 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") " 1198 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" " 1199 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->" 1200 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]", 1201 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), 1202 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), 1203 EXT_SIZE_PARAMS(eden_used_bytes_after_gc), 1204 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), 1205 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), 1206 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), 1207 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), 1208 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), 1209 EXT_SIZE_PARAMS(heap_used_bytes_after_gc), 1210 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); 1211 1212 if (full) { 1213 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); 1214 } 1215 1216 gclog_or_tty->cr(); 1217 } 1218 1219 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, 1220 double update_rs_processed_buffers, 1221 double goal_ms) { 1222 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 1223 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine(); 1224 1225 if (G1UseAdaptiveConcRefinement) { 1226 const int k_gy = 3, k_gr = 6; 1227 const double inc_k = 1.1, dec_k = 0.9; 1228 1229 int g = cg1r->green_zone(); 1230 if (update_rs_time > goal_ms) { 1231 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing. 1232 } else { 1233 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) { 1234 g = (int)MAX2(g * inc_k, g + 1.0); 1235 } 1236 } 1237 // Change the refinement threads params 1238 cg1r->set_green_zone(g); 1239 cg1r->set_yellow_zone(g * k_gy); 1240 cg1r->set_red_zone(g * k_gr); 1241 cg1r->reinitialize_threads(); 1242 1243 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1); 1244 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta, 1245 cg1r->yellow_zone()); 1246 // Change the barrier params 1247 dcqs.set_process_completed_threshold(processing_threshold); 1248 dcqs.set_max_completed_queue(cg1r->red_zone()); 1249 } 1250 1251 int curr_queue_size = dcqs.completed_buffers_num(); 1252 if (curr_queue_size >= cg1r->yellow_zone()) { 1253 dcqs.set_completed_queue_padding(curr_queue_size); 1254 } else { 1255 dcqs.set_completed_queue_padding(0); 1256 } 1257 dcqs.notify_if_necessary(); 1258 } 1259 1260 double 1261 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards, 1262 size_t scanned_cards) { 1263 return 1264 predict_rs_update_time_ms(pending_cards) + 1265 predict_rs_scan_time_ms(scanned_cards) + 1266 predict_constant_other_time_ms(); 1267 } 1268 1269 double 1270 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { 1271 size_t rs_length = predict_rs_length_diff(); 1272 size_t card_num; 1273 if (gcs_are_young()) { 1274 card_num = predict_young_card_num(rs_length); 1275 } else { 1276 card_num = predict_non_young_card_num(rs_length); 1277 } 1278 return predict_base_elapsed_time_ms(pending_cards, card_num); 1279 } 1280 1281 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) { 1282 size_t bytes_to_copy; 1283 if (hr->is_marked()) 1284 bytes_to_copy = hr->max_live_bytes(); 1285 else { 1286 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); 1287 int age = hr->age_in_surv_rate_group(); 1288 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); 1289 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate); 1290 } 1291 return bytes_to_copy; 1292 } 1293 1294 double 1295 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr, 1296 bool for_young_gc) { 1297 size_t rs_length = hr->rem_set()->occupied(); 1298 size_t card_num; 1299 1300 // Predicting the number of cards is based on which type of GC 1301 // we're predicting for. 1302 if (for_young_gc) { 1303 card_num = predict_young_card_num(rs_length); 1304 } else { 1305 card_num = predict_non_young_card_num(rs_length); 1306 } 1307 size_t bytes_to_copy = predict_bytes_to_copy(hr); 1308 1309 double region_elapsed_time_ms = 1310 predict_rs_scan_time_ms(card_num) + 1311 predict_object_copy_time_ms(bytes_to_copy); 1312 1313 // The prediction of the "other" time for this region is based 1314 // upon the region type and NOT the GC type. 1315 if (hr->is_young()) { 1316 region_elapsed_time_ms += predict_young_other_time_ms(1); 1317 } else { 1318 region_elapsed_time_ms += predict_non_young_other_time_ms(1); 1319 } 1320 return region_elapsed_time_ms; 1321 } 1322 1323 void 1324 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length, 1325 uint survivor_cset_region_length) { 1326 _eden_cset_region_length = eden_cset_region_length; 1327 _survivor_cset_region_length = survivor_cset_region_length; 1328 _old_cset_region_length = 0; 1329 } 1330 1331 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { 1332 _recorded_rs_lengths = rs_lengths; 1333 } 1334 1335 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, 1336 double elapsed_ms) { 1337 _recent_gc_times_ms->add(elapsed_ms); 1338 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec); 1339 _prev_collection_pause_end_ms = end_time_sec * 1000.0; 1340 } 1341 1342 size_t G1CollectorPolicy::expansion_amount() { 1343 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0; 1344 double threshold = _gc_overhead_perc; 1345 if (recent_gc_overhead > threshold) { 1346 // We will double the existing space, or take 1347 // G1ExpandByPercentOfAvailable % of the available expansion 1348 // space, whichever is smaller, bounded below by a minimum 1349 // expansion (unless that's all that's left.) 1350 const size_t min_expand_bytes = 1*M; 1351 size_t reserved_bytes = _g1->max_capacity(); 1352 size_t committed_bytes = _g1->capacity(); 1353 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 1354 size_t expand_bytes; 1355 size_t expand_bytes_via_pct = 1356 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 1357 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 1358 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 1359 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 1360 1361 ergo_verbose5(ErgoHeapSizing, 1362 "attempt heap expansion", 1363 ergo_format_reason("recent GC overhead higher than " 1364 "threshold after GC") 1365 ergo_format_perc("recent GC overhead") 1366 ergo_format_perc("threshold") 1367 ergo_format_byte("uncommitted") 1368 ergo_format_byte_perc("calculated expansion amount"), 1369 recent_gc_overhead, threshold, 1370 uncommitted_bytes, 1371 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable); 1372 1373 return expand_bytes; 1374 } else { 1375 return 0; 1376 } 1377 } 1378 1379 void G1CollectorPolicy::print_tracing_info() const { 1380 _trace_gen0_time_data.print(); 1381 _trace_gen1_time_data.print(); 1382 } 1383 1384 void G1CollectorPolicy::print_yg_surv_rate_info() const { 1385 #ifndef PRODUCT 1386 _short_lived_surv_rate_group->print_surv_rate_summary(); 1387 // add this call for any other surv rate groups 1388 #endif // PRODUCT 1389 } 1390 1391 uint G1CollectorPolicy::max_regions(int purpose) { 1392 switch (purpose) { 1393 case GCAllocForSurvived: 1394 return _max_survivor_regions; 1395 case GCAllocForTenured: 1396 return REGIONS_UNLIMITED; 1397 default: 1398 ShouldNotReachHere(); 1399 return REGIONS_UNLIMITED; 1400 }; 1401 } 1402 1403 void G1CollectorPolicy::update_max_gc_locker_expansion() { 1404 uint expansion_region_num = 0; 1405 if (GCLockerEdenExpansionPercent > 0) { 1406 double perc = (double) GCLockerEdenExpansionPercent / 100.0; 1407 double expansion_region_num_d = perc * (double) _young_list_target_length; 1408 // We use ceiling so that if expansion_region_num_d is > 0.0 (but 1409 // less than 1.0) we'll get 1. 1410 expansion_region_num = (uint) ceil(expansion_region_num_d); 1411 } else { 1412 assert(expansion_region_num == 0, "sanity"); 1413 } 1414 _young_list_max_length = _young_list_target_length + expansion_region_num; 1415 assert(_young_list_target_length <= _young_list_max_length, "post-condition"); 1416 } 1417 1418 // Calculates survivor space parameters. 1419 void G1CollectorPolicy::update_survivors_policy() { 1420 double max_survivor_regions_d = 1421 (double) _young_list_target_length / (double) SurvivorRatio; 1422 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but 1423 // smaller than 1.0) we'll get 1. 1424 _max_survivor_regions = (uint) ceil(max_survivor_regions_d); 1425 1426 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( 1427 HeapRegion::GrainWords * _max_survivor_regions); 1428 } 1429 1430 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 1431 GCCause::Cause gc_cause) { 1432 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1433 if (!during_cycle) { 1434 ergo_verbose1(ErgoConcCycles, 1435 "request concurrent cycle initiation", 1436 ergo_format_reason("requested by GC cause") 1437 ergo_format_str("GC cause"), 1438 GCCause::to_string(gc_cause)); 1439 set_initiate_conc_mark_if_possible(); 1440 return true; 1441 } else { 1442 ergo_verbose1(ErgoConcCycles, 1443 "do not request concurrent cycle initiation", 1444 ergo_format_reason("concurrent cycle already in progress") 1445 ergo_format_str("GC cause"), 1446 GCCause::to_string(gc_cause)); 1447 return false; 1448 } 1449 } 1450 1451 void 1452 G1CollectorPolicy::decide_on_conc_mark_initiation() { 1453 // We are about to decide on whether this pause will be an 1454 // initial-mark pause. 1455 1456 // First, during_initial_mark_pause() should not be already set. We 1457 // will set it here if we have to. However, it should be cleared by 1458 // the end of the pause (it's only set for the duration of an 1459 // initial-mark pause). 1460 assert(!during_initial_mark_pause(), "pre-condition"); 1461 1462 if (initiate_conc_mark_if_possible()) { 1463 // We had noticed on a previous pause that the heap occupancy has 1464 // gone over the initiating threshold and we should start a 1465 // concurrent marking cycle. So we might initiate one. 1466 1467 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 1468 if (!during_cycle) { 1469 // The concurrent marking thread is not "during a cycle", i.e., 1470 // it has completed the last one. So we can go ahead and 1471 // initiate a new cycle. 1472 1473 set_during_initial_mark_pause(); 1474 // We do not allow mixed GCs during marking. 1475 if (!gcs_are_young()) { 1476 set_gcs_are_young(true); 1477 ergo_verbose0(ErgoMixedGCs, 1478 "end mixed GCs", 1479 ergo_format_reason("concurrent cycle is about to start")); 1480 } 1481 1482 // And we can now clear initiate_conc_mark_if_possible() as 1483 // we've already acted on it. 1484 clear_initiate_conc_mark_if_possible(); 1485 1486 ergo_verbose0(ErgoConcCycles, 1487 "initiate concurrent cycle", 1488 ergo_format_reason("concurrent cycle initiation requested")); 1489 } else { 1490 // The concurrent marking thread is still finishing up the 1491 // previous cycle. If we start one right now the two cycles 1492 // overlap. In particular, the concurrent marking thread might 1493 // be in the process of clearing the next marking bitmap (which 1494 // we will use for the next cycle if we start one). Starting a 1495 // cycle now will be bad given that parts of the marking 1496 // information might get cleared by the marking thread. And we 1497 // cannot wait for the marking thread to finish the cycle as it 1498 // periodically yields while clearing the next marking bitmap 1499 // and, if it's in a yield point, it's waiting for us to 1500 // finish. So, at this point we will not start a cycle and we'll 1501 // let the concurrent marking thread complete the last one. 1502 ergo_verbose0(ErgoConcCycles, 1503 "do not initiate concurrent cycle", 1504 ergo_format_reason("concurrent cycle already in progress")); 1505 } 1506 } 1507 } 1508 1509 class KnownGarbageClosure: public HeapRegionClosure { 1510 G1CollectedHeap* _g1h; 1511 CollectionSetChooser* _hrSorted; 1512 1513 public: 1514 KnownGarbageClosure(CollectionSetChooser* hrSorted) : 1515 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { } 1516 1517 bool doHeapRegion(HeapRegion* r) { 1518 // We only include humongous regions in collection 1519 // sets when concurrent mark shows that their contained object is 1520 // unreachable. 1521 1522 // Do we have any marking information for this region? 1523 if (r->is_marked()) { 1524 // We will skip any region that's currently used as an old GC 1525 // alloc region (we should not consider those for collection 1526 // before we fill them up). 1527 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1528 _hrSorted->add_region(r); 1529 } 1530 } 1531 return false; 1532 } 1533 }; 1534 1535 class ParKnownGarbageHRClosure: public HeapRegionClosure { 1536 G1CollectedHeap* _g1h; 1537 CSetChooserParUpdater _cset_updater; 1538 1539 public: 1540 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 1541 uint chunk_size) : 1542 _g1h(G1CollectedHeap::heap()), 1543 _cset_updater(hrSorted, true /* parallel */, chunk_size) { } 1544 1545 bool doHeapRegion(HeapRegion* r) { 1546 // Do we have any marking information for this region? 1547 if (r->is_marked()) { 1548 // We will skip any region that's currently used as an old GC 1549 // alloc region (we should not consider those for collection 1550 // before we fill them up). 1551 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 1552 _cset_updater.add_region(r); 1553 } 1554 } 1555 return false; 1556 } 1557 }; 1558 1559 class ParKnownGarbageTask: public AbstractGangTask { 1560 CollectionSetChooser* _hrSorted; 1561 uint _chunk_size; 1562 G1CollectedHeap* _g1; 1563 public: 1564 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : 1565 AbstractGangTask("ParKnownGarbageTask"), 1566 _hrSorted(hrSorted), _chunk_size(chunk_size), 1567 _g1(G1CollectedHeap::heap()) { } 1568 1569 void work(uint worker_id) { 1570 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 1571 1572 // Back to zero for the claim value. 1573 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, 1574 _g1->workers()->active_workers(), 1575 HeapRegion::InitialClaimValue); 1576 } 1577 }; 1578 1579 void 1580 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1581 _collectionSetChooser->clear(); 1582 1583 uint region_num = _g1->n_regions(); 1584 if (G1CollectedHeap::use_parallel_gc_threads()) { 1585 const uint OverpartitionFactor = 4; 1586 uint WorkUnit; 1587 // The use of MinChunkSize = 8 in the original code 1588 // causes some assertion failures when the total number of 1589 // region is less than 8. The code here tries to fix that. 1590 // Should the original code also be fixed? 1591 if (no_of_gc_threads > 0) { 1592 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); 1593 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), 1594 MinWorkUnit); 1595 } else { 1596 assert(no_of_gc_threads > 0, 1597 "The active gc workers should be greater than 0"); 1598 // In a product build do something reasonable to avoid a crash. 1599 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); 1600 WorkUnit = 1601 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 1602 MinWorkUnit); 1603 } 1604 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 1605 WorkUnit); 1606 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 1607 (int) WorkUnit); 1608 _g1->workers()->run_task(&parKnownGarbageTask); 1609 1610 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue), 1611 "sanity check"); 1612 } else { 1613 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 1614 _g1->heap_region_iterate(&knownGarbagecl); 1615 } 1616 1617 _collectionSetChooser->sort_regions(); 1618 1619 double end_sec = os::elapsedTime(); 1620 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 1621 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 1622 _cur_mark_stop_world_time_ms += elapsed_time_ms; 1623 _prev_collection_pause_end_ms += elapsed_time_ms; 1624 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true); 1625 } 1626 1627 // Add the heap region at the head of the non-incremental collection set 1628 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) { 1629 assert(_inc_cset_build_state == Active, "Precondition"); 1630 assert(!hr->is_young(), "non-incremental add of young region"); 1631 1632 assert(!hr->in_collection_set(), "should not already be in the CSet"); 1633 hr->set_in_collection_set(true); 1634 hr->set_next_in_collection_set(_collection_set); 1635 _collection_set = hr; 1636 _collection_set_bytes_used_before += hr->used(); 1637 _g1->register_region_with_in_cset_fast_test(hr); 1638 size_t rs_length = hr->rem_set()->occupied(); 1639 _recorded_rs_lengths += rs_length; 1640 _old_cset_region_length += 1; 1641 } 1642 1643 // Initialize the per-collection-set information 1644 void G1CollectorPolicy::start_incremental_cset_building() { 1645 assert(_inc_cset_build_state == Inactive, "Precondition"); 1646 1647 _inc_cset_head = NULL; 1648 _inc_cset_tail = NULL; 1649 _inc_cset_bytes_used_before = 0; 1650 1651 _inc_cset_max_finger = 0; 1652 _inc_cset_recorded_rs_lengths = 0; 1653 _inc_cset_recorded_rs_lengths_diffs = 0; 1654 _inc_cset_predicted_elapsed_time_ms = 0.0; 1655 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1656 _inc_cset_build_state = Active; 1657 } 1658 1659 void G1CollectorPolicy::finalize_incremental_cset_building() { 1660 assert(_inc_cset_build_state == Active, "Precondition"); 1661 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint"); 1662 1663 // The two "main" fields, _inc_cset_recorded_rs_lengths and 1664 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread 1665 // that adds a new region to the CSet. Further updates by the 1666 // concurrent refinement thread that samples the young RSet lengths 1667 // are accumulated in the *_diffs fields. Here we add the diffs to 1668 // the "main" fields. 1669 1670 if (_inc_cset_recorded_rs_lengths_diffs >= 0) { 1671 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs; 1672 } else { 1673 // This is defensive. The diff should in theory be always positive 1674 // as RSets can only grow between GCs. However, given that we 1675 // sample their size concurrently with other threads updating them 1676 // it's possible that we might get the wrong size back, which 1677 // could make the calculations somewhat inaccurate. 1678 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs); 1679 if (_inc_cset_recorded_rs_lengths >= diffs) { 1680 _inc_cset_recorded_rs_lengths -= diffs; 1681 } else { 1682 _inc_cset_recorded_rs_lengths = 0; 1683 } 1684 } 1685 _inc_cset_predicted_elapsed_time_ms += 1686 _inc_cset_predicted_elapsed_time_ms_diffs; 1687 1688 _inc_cset_recorded_rs_lengths_diffs = 0; 1689 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0; 1690 } 1691 1692 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { 1693 // This routine is used when: 1694 // * adding survivor regions to the incremental cset at the end of an 1695 // evacuation pause, 1696 // * adding the current allocation region to the incremental cset 1697 // when it is retired, and 1698 // * updating existing policy information for a region in the 1699 // incremental cset via young list RSet sampling. 1700 // Therefore this routine may be called at a safepoint by the 1701 // VM thread, or in-between safepoints by mutator threads (when 1702 // retiring the current allocation region) or a concurrent 1703 // refine thread (RSet sampling). 1704 1705 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1706 size_t used_bytes = hr->used(); 1707 _inc_cset_recorded_rs_lengths += rs_length; 1708 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; 1709 _inc_cset_bytes_used_before += used_bytes; 1710 1711 // Cache the values we have added to the aggregated informtion 1712 // in the heap region in case we have to remove this region from 1713 // the incremental collection set, or it is updated by the 1714 // rset sampling code 1715 hr->set_recorded_rs_length(rs_length); 1716 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); 1717 } 1718 1719 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, 1720 size_t new_rs_length) { 1721 // Update the CSet information that is dependent on the new RS length 1722 assert(hr->is_young(), "Precondition"); 1723 assert(!SafepointSynchronize::is_at_safepoint(), 1724 "should not be at a safepoint"); 1725 1726 // We could have updated _inc_cset_recorded_rs_lengths and 1727 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do 1728 // that atomically, as this code is executed by a concurrent 1729 // refinement thread, potentially concurrently with a mutator thread 1730 // allocating a new region and also updating the same fields. To 1731 // avoid the atomic operations we accumulate these updates on two 1732 // separate fields (*_diffs) and we'll just add them to the "main" 1733 // fields at the start of a GC. 1734 1735 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length(); 1736 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length; 1737 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; 1738 1739 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); 1740 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 1741 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; 1742 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; 1743 1744 hr->set_recorded_rs_length(new_rs_length); 1745 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms); 1746 } 1747 1748 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { 1749 assert(hr->is_young(), "invariant"); 1750 assert(hr->young_index_in_cset() > -1, "should have already been set"); 1751 assert(_inc_cset_build_state == Active, "Precondition"); 1752 1753 // We need to clear and set the cached recorded/cached collection set 1754 // information in the heap region here (before the region gets added 1755 // to the collection set). An individual heap region's cached values 1756 // are calculated, aggregated with the policy collection set info, 1757 // and cached in the heap region here (initially) and (subsequently) 1758 // by the Young List sampling code. 1759 1760 size_t rs_length = hr->rem_set()->occupied(); 1761 add_to_incremental_cset_info(hr, rs_length); 1762 1763 HeapWord* hr_end = hr->end(); 1764 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); 1765 1766 assert(!hr->in_collection_set(), "invariant"); 1767 hr->set_in_collection_set(true); 1768 assert( hr->next_in_collection_set() == NULL, "invariant"); 1769 1770 _g1->register_region_with_in_cset_fast_test(hr); 1771 } 1772 1773 // Add the region at the RHS of the incremental cset 1774 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { 1775 // We should only ever be appending survivors at the end of a pause 1776 assert( hr->is_survivor(), "Logic"); 1777 1778 // Do the 'common' stuff 1779 add_region_to_incremental_cset_common(hr); 1780 1781 // Now add the region at the right hand side 1782 if (_inc_cset_tail == NULL) { 1783 assert(_inc_cset_head == NULL, "invariant"); 1784 _inc_cset_head = hr; 1785 } else { 1786 _inc_cset_tail->set_next_in_collection_set(hr); 1787 } 1788 _inc_cset_tail = hr; 1789 } 1790 1791 // Add the region to the LHS of the incremental cset 1792 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { 1793 // Survivors should be added to the RHS at the end of a pause 1794 assert(!hr->is_survivor(), "Logic"); 1795 1796 // Do the 'common' stuff 1797 add_region_to_incremental_cset_common(hr); 1798 1799 // Add the region at the left hand side 1800 hr->set_next_in_collection_set(_inc_cset_head); 1801 if (_inc_cset_head == NULL) { 1802 assert(_inc_cset_tail == NULL, "Invariant"); 1803 _inc_cset_tail = hr; 1804 } 1805 _inc_cset_head = hr; 1806 } 1807 1808 #ifndef PRODUCT 1809 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { 1810 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); 1811 1812 st->print_cr("\nCollection_set:"); 1813 HeapRegion* csr = list_head; 1814 while (csr != NULL) { 1815 HeapRegion* next = csr->next_in_collection_set(); 1816 assert(csr->in_collection_set(), "bad CS"); 1817 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d", 1818 HR_FORMAT_PARAMS(csr), 1819 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(), 1820 csr->age_in_surv_rate_group_cond()); 1821 csr = next; 1822 } 1823 } 1824 #endif // !PRODUCT 1825 1826 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) { 1827 // Returns the given amount of reclaimable bytes (that represents 1828 // the amount of reclaimable space still to be collected) as a 1829 // percentage of the current heap capacity. 1830 size_t capacity_bytes = _g1->capacity(); 1831 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 1832 } 1833 1834 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 1835 const char* false_action_str) { 1836 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1837 if (cset_chooser->is_empty()) { 1838 ergo_verbose0(ErgoMixedGCs, 1839 false_action_str, 1840 ergo_format_reason("candidate old regions not available")); 1841 return false; 1842 } 1843 1844 // Is the amount of uncollected reclaimable space above G1HeapWastePercent? 1845 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 1846 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 1847 double threshold = (double) G1HeapWastePercent; 1848 if (reclaimable_perc <= threshold) { 1849 ergo_verbose4(ErgoMixedGCs, 1850 false_action_str, 1851 ergo_format_reason("reclaimable percentage not over threshold") 1852 ergo_format_region("candidate old regions") 1853 ergo_format_byte_perc("reclaimable") 1854 ergo_format_perc("threshold"), 1855 cset_chooser->remaining_regions(), 1856 reclaimable_bytes, 1857 reclaimable_perc, threshold); 1858 return false; 1859 } 1860 1861 ergo_verbose4(ErgoMixedGCs, 1862 true_action_str, 1863 ergo_format_reason("candidate old regions available") 1864 ergo_format_region("candidate old regions") 1865 ergo_format_byte_perc("reclaimable") 1866 ergo_format_perc("threshold"), 1867 cset_chooser->remaining_regions(), 1868 reclaimable_bytes, 1869 reclaimable_perc, threshold); 1870 return true; 1871 } 1872 1873 uint G1CollectorPolicy::calc_min_old_cset_length() { 1874 // The min old CSet region bound is based on the maximum desired 1875 // number of mixed GCs after a cycle. I.e., even if some old regions 1876 // look expensive, we should add them to the CSet anyway to make 1877 // sure we go through the available old regions in no more than the 1878 // maximum desired number of mixed GCs. 1879 // 1880 // The calculation is based on the number of marked regions we added 1881 // to the CSet chooser in the first place, not how many remain, so 1882 // that the result is the same during all mixed GCs that follow a cycle. 1883 1884 const size_t region_num = (size_t) _collectionSetChooser->length(); 1885 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1); 1886 size_t result = region_num / gc_num; 1887 // emulate ceiling 1888 if (result * gc_num < region_num) { 1889 result += 1; 1890 } 1891 return (uint) result; 1892 } 1893 1894 uint G1CollectorPolicy::calc_max_old_cset_length() { 1895 // The max old CSet region bound is based on the threshold expressed 1896 // as a percentage of the heap size. I.e., it should bound the 1897 // number of old regions added to the CSet irrespective of how many 1898 // of them are available. 1899 1900 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1901 const size_t region_num = g1h->n_regions(); 1902 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; 1903 size_t result = region_num * perc / 100; 1904 // emulate ceiling 1905 if (100 * result < region_num * perc) { 1906 result += 1; 1907 } 1908 return (uint) result; 1909 } 1910 1911 1912 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { 1913 double young_start_time_sec = os::elapsedTime(); 1914 1915 YoungList* young_list = _g1->young_list(); 1916 finalize_incremental_cset_building(); 1917 1918 guarantee(target_pause_time_ms > 0.0, 1919 err_msg("target_pause_time_ms = %1.6lf should be positive", 1920 target_pause_time_ms)); 1921 guarantee(_collection_set == NULL, "Precondition"); 1922 1923 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 1924 double predicted_pause_time_ms = base_time_ms; 1925 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); 1926 1927 ergo_verbose4(ErgoCSetConstruction | ErgoHigh, 1928 "start choosing CSet", 1929 ergo_format_size("_pending_cards") 1930 ergo_format_ms("predicted base time") 1931 ergo_format_ms("remaining time") 1932 ergo_format_ms("target pause time"), 1933 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); 1934 1935 _last_gc_was_young = gcs_are_young() ? true : false; 1936 1937 if (_last_gc_was_young) { 1938 _trace_gen0_time_data.increment_young_collection_count(); 1939 } else { 1940 _trace_gen0_time_data.increment_mixed_collection_count(); 1941 } 1942 1943 // The young list is laid with the survivor regions from the previous 1944 // pause are appended to the RHS of the young list, i.e. 1945 // [Newly Young Regions ++ Survivors from last pause]. 1946 1947 uint survivor_region_length = young_list->survivor_length(); 1948 uint eden_region_length = young_list->length() - survivor_region_length; 1949 init_cset_region_lengths(eden_region_length, survivor_region_length); 1950 1951 HeapRegion* hr = young_list->first_survivor_region(); 1952 while (hr != NULL) { 1953 assert(hr->is_survivor(), "badly formed young list"); 1954 hr->set_young(); 1955 hr = hr->get_next_young_region(); 1956 } 1957 1958 // Clear the fields that point to the survivor list - they are all young now. 1959 young_list->clear_survivors(); 1960 1961 _collection_set = _inc_cset_head; 1962 _collection_set_bytes_used_before = _inc_cset_bytes_used_before; 1963 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); 1964 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; 1965 1966 ergo_verbose3(ErgoCSetConstruction | ErgoHigh, 1967 "add young regions to CSet", 1968 ergo_format_region("eden") 1969 ergo_format_region("survivors") 1970 ergo_format_ms("predicted young region time"), 1971 eden_region_length, survivor_region_length, 1972 _inc_cset_predicted_elapsed_time_ms); 1973 1974 // The number of recorded young regions is the incremental 1975 // collection set's current size 1976 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); 1977 1978 double young_end_time_sec = os::elapsedTime(); 1979 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0); 1980 1981 // Set the start of the non-young choice time. 1982 double non_young_start_time_sec = young_end_time_sec; 1983 1984 if (!gcs_are_young()) { 1985 CollectionSetChooser* cset_chooser = _collectionSetChooser; 1986 cset_chooser->verify(); 1987 const uint min_old_cset_length = calc_min_old_cset_length(); 1988 const uint max_old_cset_length = calc_max_old_cset_length(); 1989 1990 uint expensive_region_num = 0; 1991 bool check_time_remaining = adaptive_young_list_length(); 1992 1993 HeapRegion* hr = cset_chooser->peek(); 1994 while (hr != NULL) { 1995 if (old_cset_region_length() >= max_old_cset_length) { 1996 // Added maximum number of old regions to the CSet. 1997 ergo_verbose2(ErgoCSetConstruction, 1998 "finish adding old regions to CSet", 1999 ergo_format_reason("old CSet region num reached max") 2000 ergo_format_region("old") 2001 ergo_format_region("max"), 2002 old_cset_region_length(), max_old_cset_length); 2003 break; 2004 } 2005 2006 2007 // Stop adding regions if the remaining reclaimable space is 2008 // not above G1HeapWastePercent. 2009 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 2010 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); 2011 double threshold = (double) G1HeapWastePercent; 2012 if (reclaimable_perc <= threshold) { 2013 // We've added enough old regions that the amount of uncollected 2014 // reclaimable space is at or below the waste threshold. Stop 2015 // adding old regions to the CSet. 2016 ergo_verbose5(ErgoCSetConstruction, 2017 "finish adding old regions to CSet", 2018 ergo_format_reason("reclaimable percentage not over threshold") 2019 ergo_format_region("old") 2020 ergo_format_region("max") 2021 ergo_format_byte_perc("reclaimable") 2022 ergo_format_perc("threshold"), 2023 old_cset_region_length(), 2024 max_old_cset_length, 2025 reclaimable_bytes, 2026 reclaimable_perc, threshold); 2027 break; 2028 } 2029 2030 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); 2031 if (check_time_remaining) { 2032 if (predicted_time_ms > time_remaining_ms) { 2033 // Too expensive for the current CSet. 2034 2035 if (old_cset_region_length() >= min_old_cset_length) { 2036 // We have added the minimum number of old regions to the CSet, 2037 // we are done with this CSet. 2038 ergo_verbose4(ErgoCSetConstruction, 2039 "finish adding old regions to CSet", 2040 ergo_format_reason("predicted time is too high") 2041 ergo_format_ms("predicted time") 2042 ergo_format_ms("remaining time") 2043 ergo_format_region("old") 2044 ergo_format_region("min"), 2045 predicted_time_ms, time_remaining_ms, 2046 old_cset_region_length(), min_old_cset_length); 2047 break; 2048 } 2049 2050 // We'll add it anyway given that we haven't reached the 2051 // minimum number of old regions. 2052 expensive_region_num += 1; 2053 } 2054 } else { 2055 if (old_cset_region_length() >= min_old_cset_length) { 2056 // In the non-auto-tuning case, we'll finish adding regions 2057 // to the CSet if we reach the minimum. 2058 ergo_verbose2(ErgoCSetConstruction, 2059 "finish adding old regions to CSet", 2060 ergo_format_reason("old CSet region num reached min") 2061 ergo_format_region("old") 2062 ergo_format_region("min"), 2063 old_cset_region_length(), min_old_cset_length); 2064 break; 2065 } 2066 } 2067 2068 // We will add this region to the CSet. 2069 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0); 2070 predicted_pause_time_ms += predicted_time_ms; 2071 cset_chooser->remove_and_move_to_next(hr); 2072 _g1->old_set_remove(hr); 2073 add_old_region_to_cset(hr); 2074 2075 hr = cset_chooser->peek(); 2076 } 2077 if (hr == NULL) { 2078 ergo_verbose0(ErgoCSetConstruction, 2079 "finish adding old regions to CSet", 2080 ergo_format_reason("candidate old regions not available")); 2081 } 2082 2083 if (expensive_region_num > 0) { 2084 // We print the information once here at the end, predicated on 2085 // whether we added any apparently expensive regions or not, to 2086 // avoid generating output per region. 2087 ergo_verbose4(ErgoCSetConstruction, 2088 "added expensive regions to CSet", 2089 ergo_format_reason("old CSet region num not reached min") 2090 ergo_format_region("old") 2091 ergo_format_region("expensive") 2092 ergo_format_region("min") 2093 ergo_format_ms("remaining time"), 2094 old_cset_region_length(), 2095 expensive_region_num, 2096 min_old_cset_length, 2097 time_remaining_ms); 2098 } 2099 2100 cset_chooser->verify(); 2101 } 2102 2103 stop_incremental_cset_building(); 2104 2105 ergo_verbose5(ErgoCSetConstruction, 2106 "finish choosing CSet", 2107 ergo_format_region("eden") 2108 ergo_format_region("survivors") 2109 ergo_format_region("old") 2110 ergo_format_ms("predicted pause time") 2111 ergo_format_ms("target pause time"), 2112 eden_region_length, survivor_region_length, 2113 old_cset_region_length(), 2114 predicted_pause_time_ms, target_pause_time_ms); 2115 2116 double non_young_end_time_sec = os::elapsedTime(); 2117 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); 2118 evacuation_info.set_collectionset_regions(cset_region_length()); 2119 } 2120 2121 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { 2122 if(TraceGen0Time) { 2123 _all_stop_world_times_ms.add(time_to_stop_the_world_ms); 2124 } 2125 } 2126 2127 void TraceGen0TimeData::record_yield_time(double yield_time_ms) { 2128 if(TraceGen0Time) { 2129 _all_yield_times_ms.add(yield_time_ms); 2130 } 2131 } 2132 2133 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { 2134 if(TraceGen0Time) { 2135 _total.add(pause_time_ms); 2136 _other.add(pause_time_ms - phase_times->accounted_time_ms()); 2137 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); 2138 _parallel.add(phase_times->cur_collection_par_time_ms()); 2139 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); 2140 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); 2141 _update_rs.add(phase_times->average_last_update_rs_time()); 2142 _scan_rs.add(phase_times->average_last_scan_rs_time()); 2143 _obj_copy.add(phase_times->average_last_obj_copy_time()); 2144 _termination.add(phase_times->average_last_termination_time()); 2145 2146 double parallel_known_time = phase_times->average_last_ext_root_scan_time() + 2147 phase_times->average_last_satb_filtering_times_ms() + 2148 phase_times->average_last_update_rs_time() + 2149 phase_times->average_last_scan_rs_time() + 2150 phase_times->average_last_obj_copy_time() + 2151 + phase_times->average_last_termination_time(); 2152 2153 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; 2154 _parallel_other.add(parallel_other_time); 2155 _clear_ct.add(phase_times->cur_clear_ct_time_ms()); 2156 } 2157 } 2158 2159 void TraceGen0TimeData::increment_young_collection_count() { 2160 if(TraceGen0Time) { 2161 ++_young_pause_num; 2162 } 2163 } 2164 2165 void TraceGen0TimeData::increment_mixed_collection_count() { 2166 if(TraceGen0Time) { 2167 ++_mixed_pause_num; 2168 } 2169 } 2170 2171 void TraceGen0TimeData::print_summary(const char* str, 2172 const NumberSeq* seq) const { 2173 double sum = seq->sum(); 2174 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", 2175 str, sum / 1000.0, seq->avg()); 2176 } 2177 2178 void TraceGen0TimeData::print_summary_sd(const char* str, 2179 const NumberSeq* seq) const { 2180 print_summary(str, seq); 2181 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", 2182 "(num", seq->num(), seq->sd(), seq->maximum()); 2183 } 2184 2185 void TraceGen0TimeData::print() const { 2186 if (!TraceGen0Time) { 2187 return; 2188 } 2189 2190 gclog_or_tty->print_cr("ALL PAUSES"); 2191 print_summary_sd(" Total", &_total); 2192 gclog_or_tty->print_cr(""); 2193 gclog_or_tty->print_cr(""); 2194 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num); 2195 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num); 2196 gclog_or_tty->print_cr(""); 2197 2198 gclog_or_tty->print_cr("EVACUATION PAUSES"); 2199 2200 if (_young_pause_num == 0 && _mixed_pause_num == 0) { 2201 gclog_or_tty->print_cr("none"); 2202 } else { 2203 print_summary_sd(" Evacuation Pauses", &_total); 2204 print_summary(" Root Region Scan Wait", &_root_region_scan_wait); 2205 print_summary(" Parallel Time", &_parallel); 2206 print_summary(" Ext Root Scanning", &_ext_root_scan); 2207 print_summary(" SATB Filtering", &_satb_filtering); 2208 print_summary(" Update RS", &_update_rs); 2209 print_summary(" Scan RS", &_scan_rs); 2210 print_summary(" Object Copy", &_obj_copy); 2211 print_summary(" Termination", &_termination); 2212 print_summary(" Parallel Other", &_parallel_other); 2213 print_summary(" Clear CT", &_clear_ct); 2214 print_summary(" Other", &_other); 2215 } 2216 gclog_or_tty->print_cr(""); 2217 2218 gclog_or_tty->print_cr("MISC"); 2219 print_summary_sd(" Stop World", &_all_stop_world_times_ms); 2220 print_summary_sd(" Yields", &_all_yield_times_ms); 2221 } 2222 2223 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) { 2224 if (TraceGen1Time) { 2225 _all_full_gc_times.add(full_gc_time_ms); 2226 } 2227 } 2228 2229 void TraceGen1TimeData::print() const { 2230 if (!TraceGen1Time) { 2231 return; 2232 } 2233 2234 if (_all_full_gc_times.num() > 0) { 2235 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s", 2236 _all_full_gc_times.num(), 2237 _all_full_gc_times.sum() / 1000.0); 2238 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg()); 2239 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]", 2240 _all_full_gc_times.sd(), 2241 _all_full_gc_times.maximum()); 2242 } 2243 }