1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP 27 28 #include "gc/g1/collectionSetChooser.hpp" 29 #include "gc/g1/g1CollectorState.hpp" 30 #include "gc/g1/g1GCPhaseTimes.hpp" 31 #include "gc/g1/g1InCSetState.hpp" 32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" 33 #include "gc/g1/g1MMUTracker.hpp" 34 #include "gc/g1/g1Predictions.hpp" 35 #include "gc/shared/collectorPolicy.hpp" 36 #include "utilities/pair.hpp" 37 38 // A G1CollectorPolicy makes policy decisions that determine the 39 // characteristics of the collector. Examples include: 40 // * choice of collection set. 41 // * when to collect. 42 43 class HeapRegion; 44 class CollectionSetChooser; 45 class G1IHOPControl; 46 class G1YoungGenSizer; 47 48 class G1CollectorPolicy: public CollectorPolicy { 49 private: 50 G1IHOPControl* _ihop_control; 51 52 G1IHOPControl* create_ihop_control() const; 53 // Update the IHOP control with necessary statistics. 54 void update_ihop_prediction(double mutator_time_s, 55 size_t mutator_alloc_bytes, 56 size_t young_gen_size); 57 void report_ihop_statistics(); 58 59 G1Predictions _predictor; 60 61 double get_new_prediction(TruncatedSeq const* seq) const; 62 size_t get_new_size_prediction(TruncatedSeq const* seq) const; 63 64 G1MMUTracker* _mmu_tracker; 65 66 void initialize_alignments(); 67 void initialize_flags(); 68 69 CollectionSetChooser* _cset_chooser; 70 71 double _full_collection_start_sec; 72 73 // These exclude marking times. 74 TruncatedSeq* _recent_gc_times_ms; 75 76 TruncatedSeq* _concurrent_mark_remark_times_ms; 77 TruncatedSeq* _concurrent_mark_cleanup_times_ms; 78 79 // Ratio check data for determining if heap growth is necessary. 80 uint _ratio_over_threshold_count; 81 double _ratio_over_threshold_sum; 82 uint _pauses_since_start; 83 84 uint _young_list_target_length; 85 uint _young_list_fixed_length; 86 87 // The max number of regions we can extend the eden by while the GC 88 // locker is active. This should be >= _young_list_target_length; 89 uint _young_list_max_length; 90 91 SurvRateGroup* _short_lived_surv_rate_group; 92 SurvRateGroup* _survivor_surv_rate_group; 93 // add here any more surv rate groups 94 95 double _gc_overhead_perc; 96 97 double _reserve_factor; 98 uint _reserve_regions; 99 100 enum PredictionConstants { 101 TruncatedSeqLength = 10, 102 NumPrevPausesForHeuristics = 10, 103 // MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics, 104 // representing the minimum number of pause time ratios that exceed 105 // GCTimeRatio before a heap expansion will be triggered. 106 MinOverThresholdForGrowth = 4 107 }; 108 109 TruncatedSeq* _alloc_rate_ms_seq; 110 double _prev_collection_pause_end_ms; 111 112 TruncatedSeq* _rs_length_diff_seq; 113 TruncatedSeq* _cost_per_card_ms_seq; 114 TruncatedSeq* _cost_scan_hcc_seq; 115 TruncatedSeq* _young_cards_per_entry_ratio_seq; 116 TruncatedSeq* _mixed_cards_per_entry_ratio_seq; 117 TruncatedSeq* _cost_per_entry_ms_seq; 118 TruncatedSeq* _mixed_cost_per_entry_ms_seq; 119 TruncatedSeq* _cost_per_byte_ms_seq; 120 TruncatedSeq* _constant_other_time_ms_seq; 121 TruncatedSeq* _young_other_cost_per_region_ms_seq; 122 TruncatedSeq* _non_young_other_cost_per_region_ms_seq; 123 124 TruncatedSeq* _pending_cards_seq; 125 TruncatedSeq* _rs_lengths_seq; 126 127 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 128 129 G1YoungGenSizer* _young_gen_sizer; 130 131 uint _eden_cset_region_length; 132 uint _survivor_cset_region_length; 133 uint _old_cset_region_length; 134 135 void init_cset_region_lengths(uint eden_cset_region_length, 136 uint survivor_cset_region_length); 137 138 uint eden_cset_region_length() const { return _eden_cset_region_length; } 139 uint survivor_cset_region_length() const { return _survivor_cset_region_length; } 140 uint old_cset_region_length() const { return _old_cset_region_length; } 141 142 uint _free_regions_at_end_of_collection; 143 144 size_t _recorded_rs_lengths; 145 size_t _max_rs_lengths; 146 147 size_t _rs_lengths_prediction; 148 149 #ifndef PRODUCT 150 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group); 151 #endif // PRODUCT 152 153 void adjust_concurrent_refinement(double update_rs_time, 154 double update_rs_processed_buffers, 155 double goal_ms); 156 157 double _pause_time_target_ms; 158 159 size_t _pending_cards; 160 161 // The amount of allocated bytes in old gen during the last mutator and the following 162 // young GC phase. 163 size_t _bytes_allocated_in_old_since_last_gc; 164 165 G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; 166 public: 167 const G1Predictions& predictor() const { return _predictor; } 168 169 // Add the given number of bytes to the total number of allocated bytes in the old gen. 170 void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } 171 172 // Accessors 173 174 void set_region_eden(HeapRegion* hr, int young_index_in_cset) { 175 hr->set_eden(); 176 hr->install_surv_rate_group(_short_lived_surv_rate_group); 177 hr->set_young_index_in_cset(young_index_in_cset); 178 } 179 180 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) { 181 assert(hr->is_survivor(), "pre-condition"); 182 hr->install_surv_rate_group(_survivor_surv_rate_group); 183 hr->set_young_index_in_cset(young_index_in_cset); 184 } 185 186 #ifndef PRODUCT 187 bool verify_young_ages(); 188 #endif // PRODUCT 189 190 void record_max_rs_lengths(size_t rs_lengths) { 191 _max_rs_lengths = rs_lengths; 192 } 193 194 size_t predict_rs_length_diff() const; 195 196 double predict_alloc_rate_ms() const; 197 198 double predict_cost_per_card_ms() const; 199 200 double predict_scan_hcc_ms() const; 201 202 double predict_rs_update_time_ms(size_t pending_cards) const; 203 204 double predict_young_cards_per_entry_ratio() const; 205 206 double predict_mixed_cards_per_entry_ratio() const; 207 208 size_t predict_young_card_num(size_t rs_length) const; 209 210 size_t predict_non_young_card_num(size_t rs_length) const; 211 212 double predict_rs_scan_time_ms(size_t card_num) const; 213 214 double predict_mixed_rs_scan_time_ms(size_t card_num) const; 215 216 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const; 217 218 double predict_object_copy_time_ms(size_t bytes_to_copy) const; 219 220 double predict_constant_other_time_ms() const; 221 222 double predict_young_other_time_ms(size_t young_num) const; 223 224 double predict_non_young_other_time_ms(size_t non_young_num) const; 225 226 double predict_base_elapsed_time_ms(size_t pending_cards) const; 227 double predict_base_elapsed_time_ms(size_t pending_cards, 228 size_t scanned_cards) const; 229 size_t predict_bytes_to_copy(HeapRegion* hr) const; 230 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; 231 232 void set_recorded_rs_lengths(size_t rs_lengths); 233 234 uint cset_region_length() const { return young_cset_region_length() + 235 old_cset_region_length(); } 236 uint young_cset_region_length() const { return eden_cset_region_length() + 237 survivor_cset_region_length(); } 238 239 double predict_survivor_regions_evac_time() const; 240 241 bool should_update_surv_rate_group_predictors() { 242 return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window(); 243 } 244 245 void cset_regions_freed() { 246 bool update = should_update_surv_rate_group_predictors(); 247 248 _short_lived_surv_rate_group->all_surviving_words_recorded(update); 249 _survivor_surv_rate_group->all_surviving_words_recorded(update); 250 } 251 252 G1MMUTracker* mmu_tracker() { 253 return _mmu_tracker; 254 } 255 256 const G1MMUTracker* mmu_tracker() const { 257 return _mmu_tracker; 258 } 259 260 double max_pause_time_ms() const { 261 return _mmu_tracker->max_gc_time() * 1000.0; 262 } 263 264 double predict_remark_time_ms() const; 265 266 double predict_cleanup_time_ms() const; 267 268 // Returns an estimate of the survival rate of the region at yg-age 269 // "yg_age". 270 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; 271 272 double predict_yg_surv_rate(int age) const; 273 274 double accum_yg_surv_rate_pred(int age) const; 275 276 protected: 277 virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; 278 virtual double other_time_ms(double pause_time_ms) const; 279 280 double young_other_time_ms() const; 281 double non_young_other_time_ms() const; 282 double constant_other_time_ms(double pause_time_ms) const; 283 284 CollectionSetChooser* cset_chooser() const { 285 return _cset_chooser; 286 } 287 288 private: 289 // Statistics kept per GC stoppage, pause or full. 290 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; 291 292 // Add a new GC of the given duration and end time to the record. 293 void update_recent_gc_times(double end_time_sec, double elapsed_ms); 294 295 // The head of the list (via "next_in_collection_set()") representing the 296 // current collection set. Set from the incrementally built collection 297 // set at the start of the pause. 298 HeapRegion* _collection_set; 299 300 // The number of bytes in the collection set before the pause. Set from 301 // the incrementally built collection set at the start of an evacuation 302 // pause, and incremented in finalize_old_cset_part() when adding old regions 303 // (if any) to the collection set. 304 size_t _collection_set_bytes_used_before; 305 306 // The number of bytes copied during the GC. 307 size_t _bytes_copied_during_gc; 308 309 // The associated information that is maintained while the incremental 310 // collection set is being built with young regions. Used to populate 311 // the recorded info for the evacuation pause. 312 313 enum CSetBuildType { 314 Active, // We are actively building the collection set 315 Inactive // We are not actively building the collection set 316 }; 317 318 CSetBuildType _inc_cset_build_state; 319 320 // The head of the incrementally built collection set. 321 HeapRegion* _inc_cset_head; 322 323 // The tail of the incrementally built collection set. 324 HeapRegion* _inc_cset_tail; 325 326 // The number of bytes in the incrementally built collection set. 327 // Used to set _collection_set_bytes_used_before at the start of 328 // an evacuation pause. 329 size_t _inc_cset_bytes_used_before; 330 331 // The RSet lengths recorded for regions in the CSet. It is updated 332 // by the thread that adds a new region to the CSet. We assume that 333 // only one thread can be allocating a new CSet region (currently, 334 // it does so after taking the Heap_lock) hence no need to 335 // synchronize updates to this field. 336 size_t _inc_cset_recorded_rs_lengths; 337 338 // A concurrent refinement thread periodically samples the young 339 // region RSets and needs to update _inc_cset_recorded_rs_lengths as 340 // the RSets grow. Instead of having to synchronize updates to that 341 // field we accumulate them in this field and add it to 342 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC. 343 ssize_t _inc_cset_recorded_rs_lengths_diffs; 344 345 // The predicted elapsed time it will take to collect the regions in 346 // the CSet. This is updated by the thread that adds a new region to 347 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about 348 // MT-safety assumptions. 349 double _inc_cset_predicted_elapsed_time_ms; 350 351 // See the comment for _inc_cset_recorded_rs_lengths_diffs. 352 double _inc_cset_predicted_elapsed_time_ms_diffs; 353 354 // Stash a pointer to the g1 heap. 355 G1CollectedHeap* _g1; 356 357 G1GCPhaseTimes* _phase_times; 358 359 // The ratio of gc time to elapsed time, computed over recent pauses, 360 // and the ratio for just the last pause. 361 double _recent_avg_pause_time_ratio; 362 double _last_pause_time_ratio; 363 364 double recent_avg_pause_time_ratio() const { 365 return _recent_avg_pause_time_ratio; 366 } 367 368 // This set of variables tracks the collector efficiency, in order to 369 // determine whether we should initiate a new marking. 370 double _mark_remark_start_sec; 371 double _mark_cleanup_start_sec; 372 373 // Updates the internal young list maximum and target lengths. Returns the 374 // unbounded young list target length. 375 uint update_young_list_max_and_target_length(); 376 uint update_young_list_max_and_target_length(size_t rs_lengths); 377 378 // Update the young list target length either by setting it to the 379 // desired fixed value or by calculating it using G1's pause 380 // prediction model. If no rs_lengths parameter is passed, predict 381 // the RS lengths using the prediction model, otherwise use the 382 // given rs_lengths as the prediction. 383 // Returns the unbounded young list target length. 384 uint update_young_list_target_length(size_t rs_lengths); 385 386 // Calculate and return the minimum desired young list target 387 // length. This is the minimum desired young list length according 388 // to the user's inputs. 389 uint calculate_young_list_desired_min_length(uint base_min_length) const; 390 391 // Calculate and return the maximum desired young list target 392 // length. This is the maximum desired young list length according 393 // to the user's inputs. 394 uint calculate_young_list_desired_max_length() const; 395 396 // Calculate and return the maximum young list target length that 397 // can fit into the pause time goal. The parameters are: rs_lengths 398 // represent the prediction of how large the young RSet lengths will 399 // be, base_min_length is the already existing number of regions in 400 // the young list, min_length and max_length are the desired min and 401 // max young list length according to the user's inputs. 402 uint calculate_young_list_target_length(size_t rs_lengths, 403 uint base_min_length, 404 uint desired_min_length, 405 uint desired_max_length) const; 406 407 // Result of the bounded_young_list_target_length() method, containing both the 408 // bounded as well as the unbounded young list target lengths in this order. 409 typedef Pair<uint, uint, StackObj> YoungTargetLengths; 410 YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const; 411 412 void update_rs_lengths_prediction(); 413 void update_rs_lengths_prediction(size_t prediction); 414 415 // Calculate and return chunk size (in number of regions) for parallel 416 // concurrent mark cleanup. 417 uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const; 418 419 // Check whether a given young length (young_length) fits into the 420 // given target pause time and whether the prediction for the amount 421 // of objects to be copied for the given length will fit into the 422 // given free space (expressed by base_free_regions). It is used by 423 // calculate_young_list_target_length(). 424 bool predict_will_fit(uint young_length, double base_time_ms, 425 uint base_free_regions, double target_pause_time_ms) const; 426 427 // Calculate the minimum number of old regions we'll add to the CSet 428 // during a mixed GC. 429 uint calc_min_old_cset_length() const; 430 431 // Calculate the maximum number of old regions we'll add to the CSet 432 // during a mixed GC. 433 uint calc_max_old_cset_length() const; 434 435 // Returns the given amount of uncollected reclaimable space 436 // as a percentage of the current heap capacity. 437 double reclaimable_bytes_perc(size_t reclaimable_bytes) const; 438 439 // Sets up marking if proper conditions are met. 440 void maybe_start_marking(); 441 442 // The kind of STW pause. 443 enum PauseKind { 444 FullGC, 445 YoungOnlyGC, 446 MixedGC, 447 LastYoungGC, 448 InitialMarkGC, 449 Cleanup, 450 Remark 451 }; 452 453 // Calculate PauseKind from internal state. 454 PauseKind young_gc_pause_kind() const; 455 // Record the given STW pause with the given start and end times (in s). 456 void record_pause(PauseKind kind, double start, double end); 457 // Indicate that we aborted marking before doing any mixed GCs. 458 void abort_time_to_mixed_tracking(); 459 public: 460 461 G1CollectorPolicy(); 462 463 virtual ~G1CollectorPolicy(); 464 465 virtual G1CollectorPolicy* as_g1_policy() { return this; } 466 467 G1CollectorState* collector_state() const; 468 469 G1GCPhaseTimes* phase_times() const { return _phase_times; } 470 471 // Check the current value of the young list RSet lengths and 472 // compare it against the last prediction. If the current value is 473 // higher, recalculate the young list target length prediction. 474 void revise_young_list_target_length_if_necessary(size_t rs_lengths); 475 476 // This should be called after the heap is resized. 477 void record_new_heap_size(uint new_number_of_regions); 478 479 void init(); 480 481 virtual void note_gc_start(uint num_active_workers); 482 483 // Create jstat counters for the policy. 484 virtual void initialize_gc_policy_counters(); 485 486 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); 487 488 bool about_to_start_mixed_phase() const; 489 490 // Record the start and end of an evacuation pause. 491 void record_collection_pause_start(double start_time_sec); 492 void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc); 493 494 // Record the start and end of a full collection. 495 void record_full_collection_start(); 496 void record_full_collection_end(); 497 498 // Must currently be called while the world is stopped. 499 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); 500 501 // Record start and end of remark. 502 void record_concurrent_mark_remark_start(); 503 void record_concurrent_mark_remark_end(); 504 505 // Record start, end, and completion of cleanup. 506 void record_concurrent_mark_cleanup_start(); 507 void record_concurrent_mark_cleanup_end(); 508 void record_concurrent_mark_cleanup_completed(); 509 510 virtual void print_phases(); 511 512 // Record how much space we copied during a GC. This is typically 513 // called when a GC alloc region is being retired. 514 void record_bytes_copied_during_gc(size_t bytes) { 515 _bytes_copied_during_gc += bytes; 516 } 517 518 // The amount of space we copied during a GC. 519 size_t bytes_copied_during_gc() const { 520 return _bytes_copied_during_gc; 521 } 522 523 size_t collection_set_bytes_used_before() const { 524 return _collection_set_bytes_used_before; 525 } 526 527 // Determine whether there are candidate regions so that the 528 // next GC should be mixed. The two action strings are used 529 // in the ergo output when the method returns true or false. 530 bool next_gc_should_be_mixed(const char* true_action_str, 531 const char* false_action_str) const; 532 533 // Choose a new collection set. Marks the chosen regions as being 534 // "in_collection_set", and links them together. The head and number of 535 // the collection set are available via access methods. 536 double finalize_young_cset_part(double target_pause_time_ms); 537 virtual void finalize_old_cset_part(double time_remaining_ms); 538 539 // The head of the list (via "next_in_collection_set()") representing the 540 // current collection set. 541 HeapRegion* collection_set() { return _collection_set; } 542 543 void clear_collection_set() { _collection_set = NULL; } 544 545 // Add old region "hr" to the CSet. 546 void add_old_region_to_cset(HeapRegion* hr); 547 548 // Incremental CSet Support 549 550 // The head of the incrementally built collection set. 551 HeapRegion* inc_cset_head() { return _inc_cset_head; } 552 553 // The tail of the incrementally built collection set. 554 HeapRegion* inc_set_tail() { return _inc_cset_tail; } 555 556 // Initialize incremental collection set info. 557 void start_incremental_cset_building(); 558 559 // Perform any final calculations on the incremental CSet fields 560 // before we can use them. 561 void finalize_incremental_cset_building(); 562 563 void clear_incremental_cset() { 564 _inc_cset_head = NULL; 565 _inc_cset_tail = NULL; 566 } 567 568 // Stop adding regions to the incremental collection set 569 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } 570 571 // Add information about hr to the aggregated information for the 572 // incrementally built collection set. 573 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); 574 575 // Update information about hr in the aggregated information for 576 // the incrementally built collection set. 577 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); 578 579 private: 580 // Update the incremental cset information when adding a region 581 // (should not be called directly). 582 void add_region_to_incremental_cset_common(HeapRegion* hr); 583 584 // Set the state to start a concurrent marking cycle and clear 585 // _initiate_conc_mark_if_possible because it has now been 586 // acted on. 587 void initiate_conc_mark(); 588 589 public: 590 // Add hr to the LHS of the incremental collection set. 591 void add_region_to_incremental_cset_lhs(HeapRegion* hr); 592 593 // Add hr to the RHS of the incremental collection set. 594 void add_region_to_incremental_cset_rhs(HeapRegion* hr); 595 596 #ifndef PRODUCT 597 void print_collection_set(HeapRegion* list_head, outputStream* st); 598 #endif // !PRODUCT 599 600 // This sets the initiate_conc_mark_if_possible() flag to start a 601 // new cycle, as long as we are not already in one. It's best if it 602 // is called during a safepoint when the test whether a cycle is in 603 // progress or not is stable. 604 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); 605 606 // This is called at the very beginning of an evacuation pause (it 607 // has to be the first thing that the pause does). If 608 // initiate_conc_mark_if_possible() is true, and the concurrent 609 // marking thread has completed its work during the previous cycle, 610 // it will set during_initial_mark_pause() to so that the pause does 611 // the initial-mark work and start a marking cycle. 612 void decide_on_conc_mark_initiation(); 613 614 // If an expansion would be appropriate, because recent GC overhead had 615 // exceeded the desired limit, return an amount to expand by. 616 virtual size_t expansion_amount(); 617 618 // Clear ratio tracking data used by expansion_amount(). 619 void clear_ratio_check_data(); 620 621 // Print stats on young survival ratio 622 void print_yg_surv_rate_info() const; 623 624 void finished_recalculating_age_indexes(bool is_survivors) { 625 if (is_survivors) { 626 _survivor_surv_rate_group->finished_recalculating_age_indexes(); 627 } else { 628 _short_lived_surv_rate_group->finished_recalculating_age_indexes(); 629 } 630 // do that for any other surv rate groups 631 } 632 633 size_t young_list_target_length() const { return _young_list_target_length; } 634 635 bool is_young_list_full() const; 636 637 bool can_expand_young_list() const; 638 639 uint young_list_max_length() const { 640 return _young_list_max_length; 641 } 642 643 bool adaptive_young_list_length() const; 644 645 virtual bool should_process_references() const { 646 return true; 647 } 648 649 private: 650 // 651 // Survivor regions policy. 652 // 653 654 // Current tenuring threshold, set to 0 if the collector reaches the 655 // maximum amount of survivors regions. 656 uint _tenuring_threshold; 657 658 // The limit on the number of regions allocated for survivors. 659 uint _max_survivor_regions; 660 661 // For reporting purposes. 662 // The value of _heap_bytes_before_gc is also used to calculate 663 // the cost of copying. 664 665 // The amount of survivor regions after a collection. 666 uint _recorded_survivor_regions; 667 // List of survivor regions. 668 HeapRegion* _recorded_survivor_head; 669 HeapRegion* _recorded_survivor_tail; 670 671 AgeTable _survivors_age_table; 672 673 public: 674 uint tenuring_threshold() const { return _tenuring_threshold; } 675 676 uint max_survivor_regions() { 677 return _max_survivor_regions; 678 } 679 680 static const uint REGIONS_UNLIMITED = (uint) -1; 681 682 uint max_regions(InCSetState dest) const { 683 switch (dest.value()) { 684 case InCSetState::Young: 685 return _max_survivor_regions; 686 case InCSetState::Old: 687 return REGIONS_UNLIMITED; 688 default: 689 assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value()); 690 break; 691 } 692 // keep some compilers happy 693 return 0; 694 } 695 696 void note_start_adding_survivor_regions() { 697 _survivor_surv_rate_group->start_adding_regions(); 698 } 699 700 void note_stop_adding_survivor_regions() { 701 _survivor_surv_rate_group->stop_adding_regions(); 702 } 703 704 void record_survivor_regions(uint regions, 705 HeapRegion* head, 706 HeapRegion* tail) { 707 _recorded_survivor_regions = regions; 708 _recorded_survivor_head = head; 709 _recorded_survivor_tail = tail; 710 } 711 712 uint recorded_survivor_regions() const { 713 return _recorded_survivor_regions; 714 } 715 716 void record_age_table(AgeTable* age_table) { 717 _survivors_age_table.merge(age_table); 718 } 719 720 void update_max_gc_locker_expansion(); 721 722 // Calculates survivor space parameters. 723 void update_survivors_policy(); 724 725 virtual void post_heap_initialize(); 726 }; 727 728 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP