1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/collectedHeap.hpp" 27 #include "gc/shared/plab.inline.hpp" 28 #include "gc/shared/threadLocalAllocBuffer.hpp" 29 #include "memory/universe.hpp" 30 #include "logging/log.hpp" 31 #include "oops/arrayOop.hpp" 32 #include "oops/oop.inline.hpp" 33 34 size_t PLAB::min_size() { 35 // Make sure that we return something that is larger than AlignmentReserve 36 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (size_t)oopDesc::header_size())) + AlignmentReserve; 37 } 38 39 size_t PLAB::max_size() { 40 return ThreadLocalAllocBuffer::max_size(); 41 } 42 43 PLAB::PLAB(size_t desired_plab_sz_) : 44 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL), 45 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0) 46 { 47 // ArrayOopDesc::header_size depends on command line initialization. 48 CollectedHeap* heap = Universe::heap(); 49 int rsv_regular = heap->obj_header_size(); 50 int rsv_array = align_object_size(heap->array_header_size(T_INT)); 51 AlignmentReserve = rsv_regular > MinObjAlignment ? rsv_array : 0; 52 assert(min_size() > AlignmentReserve, 53 "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " " 54 "to be able to contain objects", min_size(), AlignmentReserve); 55 } 56 57 // If the minimum object size is greater than MinObjAlignment, we can 58 // end up with a shard at the end of the buffer that's smaller than 59 // the smallest object. We can't allow that because the buffer must 60 // look like it's full of objects when we retire it, so we make 61 // sure we have enough space for a filler int array object. 62 size_t PLAB::AlignmentReserve; 63 64 void PLAB::flush_and_retire_stats(PLABStats* stats) { 65 // Retire the last allocation buffer. 66 size_t unused = retire_internal(); 67 68 // Now flush the statistics. 69 stats->add_allocated(_allocated); 70 stats->add_wasted(_wasted); 71 stats->add_undo_wasted(_undo_wasted); 72 stats->add_unused(unused); 73 74 // Since we have flushed the stats we need to clear the _allocated and _wasted 75 // fields in case somebody retains an instance of this over GCs. Not doing so 76 // will artifically inflate the values in the statistics. 77 _allocated = 0; 78 _wasted = 0; 79 _undo_wasted = 0; 80 } 81 82 void PLAB::retire() { 83 _wasted += retire_internal(); 84 } 85 86 size_t PLAB::retire_internal() { 87 size_t result = 0; 88 if (_top < _hard_end) { 89 assert(pointer_delta(_hard_end, _top) >= (size_t)(Universe::heap()->obj_header_size()), 90 "better have enough space left to fill with dummy"); 91 Universe::heap()->fill_with_dummy_object(_top, _hard_end, true); 92 result += invalidate(); 93 } 94 return result; 95 } 96 97 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) { 98 Universe::heap()->fill_with_dummy_object(obj, obj + word_sz, true); 99 _undo_wasted += word_sz; 100 } 101 102 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) { 103 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo"); 104 assert(pointer_delta(_top, obj) == word_sz, "Bad undo"); 105 _top = obj; 106 } 107 108 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) { 109 // Is the alloc in the current alloc buffer? 110 if (contains(obj)) { 111 assert(contains(obj + word_sz - 1), 112 "should contain whole object"); 113 undo_last_allocation(obj, word_sz); 114 } else { 115 add_undo_waste(obj, word_sz); 116 } 117 } 118 119 void PLABStats::log_plab_allocation() { 120 log_debug(gc, plab)("%s PLAB allocation: " 121 "allocated: " SIZE_FORMAT "B, " 122 "wasted: " SIZE_FORMAT "B, " 123 "unused: " SIZE_FORMAT "B, " 124 "used: " SIZE_FORMAT "B, " 125 "undo waste: " SIZE_FORMAT "B, ", 126 _description, 127 _allocated * HeapWordSize, 128 _wasted * HeapWordSize, 129 _unused * HeapWordSize, 130 used() * HeapWordSize, 131 _undo_wasted * HeapWordSize); 132 } 133 134 void PLABStats::log_sizing(size_t calculated_words, size_t net_desired_words) { 135 log_debug(gc, plab)("%s sizing: " 136 "calculated: " SIZE_FORMAT "B, " 137 "actual: " SIZE_FORMAT "B", 138 _description, 139 calculated_words * HeapWordSize, 140 net_desired_words * HeapWordSize); 141 } 142 143 // Calculates plab size for current number of gc worker threads. 144 size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) { 145 return align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size())); 146 } 147 148 // Compute desired plab size for one gc worker thread and latch result for later 149 // use. This should be called once at the end of parallel 150 // scavenge; it clears the sensor accumulators. 151 void PLABStats::adjust_desired_plab_sz() { 152 log_plab_allocation(); 153 154 if (!ResizePLAB) { 155 // Clear accumulators for next round. 156 reset(); 157 return; 158 } 159 160 assert(is_object_aligned(max_size()) && min_size() <= max_size(), 161 "PLAB clipping computation may be incorrect"); 162 163 assert(_allocated != 0 || _unused == 0, 164 "Inconsistency in PLAB stats: " 165 "_allocated: " SIZE_FORMAT ", " 166 "_wasted: " SIZE_FORMAT ", " 167 "_unused: " SIZE_FORMAT ", " 168 "_undo_wasted: " SIZE_FORMAT, 169 _allocated, _wasted, _unused, _undo_wasted); 170 171 size_t plab_sz = compute_desired_plab_sz(); 172 // Take historical weighted average 173 _filter.sample(plab_sz); 174 _desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average()); 175 176 log_sizing(plab_sz, _desired_net_plab_sz); 177 // Clear accumulators for next round 178 reset(); 179 } 180 181 size_t PLABStats::compute_desired_plab_sz() { 182 size_t allocated = MAX2(_allocated, size_t(1)); 183 double wasted_frac = (double)_unused / (double)allocated; 184 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct); 185 if (target_refills == 0) { 186 target_refills = 1; 187 } 188 size_t used = allocated - _wasted - _unused; 189 // Assumed to have 1 gc worker thread 190 size_t recent_plab_sz = used / target_refills; 191 return recent_plab_sz; 192 }