rev 58105 : [mq]: 8236073-softmaxheapsize
1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.hpp" 27 #include "gc/g1/g1HeapSizingPolicy.hpp" 28 #include "gc/g1/g1Analytics.hpp" 29 #include "gc/g1/g1Policy.hpp" 30 #include "logging/log.hpp" 31 #include "runtime/globals.hpp" 32 #include "utilities/debug.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 35 G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const G1Analytics* analytics) { 36 return new G1HeapSizingPolicy(g1h, analytics); 37 } 38 39 G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : 40 _g1h(g1h), 41 _analytics(analytics), 42 _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { 43 44 assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); 45 clear_ratio_check_data(); 46 } 47 48 void G1HeapSizingPolicy::clear_ratio_check_data() { 49 _ratio_over_threshold_count = 0; 50 _ratio_over_threshold_sum = 0.0; 51 _pauses_since_start = 0; 52 } 53 54 size_t G1HeapSizingPolicy::expansion_amount_after_young_collection() { 55 double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; 56 double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; 57 assert(GCTimeRatio > 0, 58 "we should have set it to a default value set_g1_gc_flags() " 59 "if a user set it to 0"); 60 const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 61 62 double threshold = gc_overhead_percent; 63 size_t expand_bytes = 0; 64 65 // If the heap is at less than half its maximum size, scale the threshold down, 66 // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, 67 // though the scaling code will likely keep the increase small. 68 if (_g1h->capacity() <= _g1h->max_capacity() / 2) { 69 threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); 70 threshold = MAX2(threshold, 1.0); 71 } 72 73 // If the last GC time ratio is over the threshold, increment the count of 74 // times it has been exceeded, and add this ratio to the sum of exceeded 75 // ratios. 76 if (last_gc_overhead > threshold) { 77 _ratio_over_threshold_count++; 78 _ratio_over_threshold_sum += last_gc_overhead; 79 } 80 81 // Check if we've had enough GC time ratio checks that were over the 82 // threshold to trigger an expansion. We'll also expand if we've 83 // reached the end of the history buffer and the average of all entries 84 // is still over the threshold. This indicates a smaller number of GCs were 85 // long enough to make the average exceed the threshold. 86 bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; 87 if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || 88 (filled_history_buffer && (recent_gc_overhead > threshold))) { 89 size_t min_expand_bytes = HeapRegion::GrainBytes; 90 size_t reserved_bytes = _g1h->max_capacity(); 91 size_t committed_bytes = _g1h->capacity(); 92 size_t uncommitted_bytes = reserved_bytes - committed_bytes; 93 size_t expand_bytes_via_pct = 94 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; 95 double scale_factor = 1.0; 96 97 // If the current size is less than 1/4 of the Initial heap size, expand 98 // by half of the delta between the current and Initial sizes. IE, grow 99 // back quickly. 100 // 101 // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of 102 // the available expansion space, whichever is smaller, as the base 103 // expansion size. Then possibly scale this size according to how much the 104 // threshold has (on average) been exceeded by. If the delta is small 105 // (less than the StartScaleDownAt value), scale the size down linearly, but 106 // not by less than MinScaleDownFactor. If the delta is large (greater than 107 // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor 108 // times the base size. The scaling will be linear in the range from 109 // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, 110 // ScaleUpRange sets the rate of scaling up. 111 if (committed_bytes < InitialHeapSize / 4) { 112 expand_bytes = (InitialHeapSize - committed_bytes) / 2; 113 } else { 114 double const MinScaleDownFactor = 0.2; 115 double const MaxScaleUpFactor = 2; 116 double const StartScaleDownAt = gc_overhead_percent; 117 double const StartScaleUpAt = gc_overhead_percent * 1.5; 118 double const ScaleUpRange = gc_overhead_percent * 2.0; 119 120 double ratio_delta; 121 if (filled_history_buffer) { 122 ratio_delta = recent_gc_overhead - threshold; 123 } else { 124 ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; 125 } 126 127 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); 128 if (ratio_delta < StartScaleDownAt) { 129 scale_factor = ratio_delta / StartScaleDownAt; 130 scale_factor = MAX2(scale_factor, MinScaleDownFactor); 131 } else if (ratio_delta > StartScaleUpAt) { 132 scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); 133 scale_factor = MIN2(scale_factor, MaxScaleUpFactor); 134 } 135 } 136 137 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " 138 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", 139 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); 140 141 expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); 142 143 // Ensure the expansion size is at least the minimum growth amount 144 // and at most the remaining uncommitted byte size. 145 expand_bytes = MAX2(expand_bytes, min_expand_bytes); 146 expand_bytes = MIN2(expand_bytes, uncommitted_bytes); 147 148 clear_ratio_check_data(); 149 } else { 150 // An expansion was not triggered. If we've started counting, increment 151 // the number of checks we've made in the current window. If we've 152 // reached the end of the window without resizing, clear the counters to 153 // start again the next time we see a ratio above the threshold. 154 if (_ratio_over_threshold_count > 0) { 155 _pauses_since_start++; 156 if (_pauses_since_start > _num_prev_pauses_for_heuristics) { 157 clear_ratio_check_data(); 158 } 159 } 160 } 161 162 return expand_bytes; 163 } 164 165 size_t G1HeapSizingPolicy::target_heap_capacity(size_t used_bytes, uintx free_ratio) const { 166 const double free_percentage = (double) free_ratio / 100.0; 167 const double used_percentage = 1.0 - free_percentage; 168 169 // We have to be careful here as these two calculations can overflow 170 // 32-bit size_t's. 171 double used_bytes_d = (double) used_bytes; 172 double desired_capacity_d = used_bytes_d / used_percentage; 173 // Let's make sure that they are both under the max heap size, which 174 // by default will make it fit into a size_t. 175 double desired_capacity_upper_bound = (double) MaxHeapSize; 176 desired_capacity_d = MIN2(desired_capacity_d, desired_capacity_upper_bound); 177 // We can now safely turn it into size_t's. 178 return (size_t) desired_capacity_d; 179 } 180 181 size_t G1HeapSizingPolicy::shrink_amount_at_last_mixed_gc(size_t desired_bytes_after_concurrent_mark) { 182 size_t shrink_bytes = 0; 183 const size_t capacity_after_gc = _g1h->capacity(); 184 const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes(); 185 size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio); 186 // soft_max_capacity can be smaller 187 maximum_desired_capacity = MIN2(maximum_desired_capacity, _g1h->soft_max_capacity()); 188 // Make sure not less than _minimum_desired_bytes_after_last_cm 189 maximum_desired_capacity = MAX2(maximum_desired_capacity, desired_bytes_after_concurrent_mark); 190 191 if (capacity_after_gc > maximum_desired_capacity) { 192 shrink_bytes = capacity_after_gc - maximum_desired_capacity; 193 } 194 195 return shrink_bytes; 196 } --- EOF ---