1 /*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
26 #define SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
27
28 #include "gc/shared/gcUtil.hpp"
29 #include "oops/typeArrayOop.hpp"
30 #include "runtime/perfData.hpp"
31 #include "runtime/vm_version.hpp"
32
33 class GlobalTLABStats;
34
35 // ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
36 // the threads for allocation.
37 // It is thread-private at any time, but maybe multiplexed over
38 // time across multiple threads. The park()/unpark() pair is
39 // used to make it available for such multiplexing.
40 class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
41 friend class VMStructs;
42 friend class JVMCIVMStructs;
43 private:
44 HeapWord* _start; // address of TLAB
45 HeapWord* _top; // address after last allocation
46 HeapWord* _pf_top; // allocation prefetch watermark
47 HeapWord* _end; // allocation end (excluding alignment_reserve)
48 size_t _desired_size; // desired size (including alignment_reserve)
49 size_t _refill_waste_limit; // hold onto tlab if free() is larger than this
50 size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
51
52 static size_t _max_size; // maximum size of any TLAB
53 static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
54 static unsigned _target_refills; // expected number of refills between GCs
55
56 unsigned _number_of_refills;
57 unsigned _fast_refill_waste;
58 unsigned _slow_refill_waste;
59 unsigned _gc_waste;
60 unsigned _slow_allocations;
61 size_t _allocated_size;
62
63 AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs
64
65 void accumulate_statistics();
66 void initialize_statistics();
67
68 void set_start(HeapWord* start) { _start = start; }
69 void set_end(HeapWord* end) { _end = end; }
70 void set_top(HeapWord* top) { _top = top; }
71 void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; }
72 void set_desired_size(size_t desired_size) { _desired_size = desired_size; }
73 void set_refill_waste_limit(size_t waste) { _refill_waste_limit = waste; }
74
75 size_t initial_refill_waste_limit() { return desired_size() / TLABRefillWasteFraction; }
76
77 static int target_refills() { return _target_refills; }
78 size_t initial_desired_size();
79
80 size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); }
81
82 // Make parsable and release it.
83 void reset();
84
85 // Resize based on amount of allocation, etc.
86 void resize();
87
88 void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); }
89
90 void initialize(HeapWord* start, HeapWord* top, HeapWord* end);
91
92 void print_stats(const char* tag);
93
94 Thread* myThread();
95
96 // statistics
97
98 int number_of_refills() const { return _number_of_refills; }
99 int fast_refill_waste() const { return _fast_refill_waste; }
100 int slow_refill_waste() const { return _slow_refill_waste; }
101 int gc_waste() const { return _gc_waste; }
102 int slow_allocations() const { return _slow_allocations; }
103
104 static GlobalTLABStats* _global_stats;
105 static GlobalTLABStats* global_stats() { return _global_stats; }
106
107 public:
108 ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
109 // do nothing. tlabs must be inited by initialize() calls
110 }
111
112 static size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
113 static size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; }
114 static size_t max_size_in_bytes() { return max_size() * BytesPerWord; }
115 static void set_max_size(size_t max_size) { _max_size = max_size; }
116
117 HeapWord* start() const { return _start; }
118 HeapWord* end() const { return _end; }
119 HeapWord* hard_end() const { return _end + alignment_reserve(); }
120 HeapWord* top() const { return _top; }
121 HeapWord* pf_top() const { return _pf_top; }
122 size_t desired_size() const { return _desired_size; }
123 size_t used() const { return pointer_delta(top(), start()); }
124 size_t used_bytes() const { return pointer_delta(top(), start(), 1); }
125 size_t free() const { return pointer_delta(end(), top()); }
126 // Don't discard tlab if remaining space is larger than this.
127 size_t refill_waste_limit() const { return _refill_waste_limit; }
128
129 // Allocate size HeapWords. The memory is NOT initialized to zero.
130 inline HeapWord* allocate(size_t size);
131
132 // Reserve space at the end of TLAB
133 static size_t end_reserve() {
134 int reserve_size = typeArrayOopDesc::header_size(T_INT);
135 return MAX2(reserve_size, _reserve_for_allocation_prefetch);
136 }
137 static size_t alignment_reserve() { return align_object_size(end_reserve()); }
138 static size_t alignment_reserve_in_bytes() { return alignment_reserve() * HeapWordSize; }
139
140 // Return tlab size or remaining space in eden such that the
141 // space is large enough to hold obj_size and necessary fill space.
142 // Otherwise return 0;
143 inline size_t compute_size(size_t obj_size);
144
145 // Compute the minimal needed tlab size for the given object size.
146 static inline size_t compute_min_size(size_t obj_size);
147
148 // Record slow allocation
149 inline void record_slow_allocation(size_t obj_size);
150
151 // Initialization at startup
152 static void startup_initialization();
153
154 // Make an in-use tlab parsable, optionally retiring and/or zapping it.
155 void make_parsable(bool retire, bool zap = true);
156
157 // Retire in-use tlab before allocation of a new tlab
158 void clear_before_allocation();
159
160 // Accumulate statistics across all tlabs before gc
161 static void accumulate_statistics_before_gc();
162
163 // Resize tlabs for all threads
164 static void resize_all_tlabs();
165
166 void fill(HeapWord* start, HeapWord* top, size_t new_size);
167 void initialize();
168
169 static size_t refill_waste_limit_increment() { return TLABWasteIncrement; }
170
171 template <typename T> void addresses_do(T f) {
172 f(&_start);
173 f(&_top);
174 f(&_pf_top);
175 f(&_end);
176 }
177
178 // Code generation support
179 static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); }
180 static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); }
181 static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); }
182 static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); }
183 static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); }
184 static ByteSize refill_waste_limit_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _refill_waste_limit ); }
185
186 static ByteSize number_of_refills_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _number_of_refills ); }
187 static ByteSize fast_refill_waste_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _fast_refill_waste ); }
188 static ByteSize slow_allocations_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); }
189
190 void verify();
191 };
192
193 class GlobalTLABStats: public CHeapObj<mtThread> {
194 private:
195
196 // Accumulate perfdata in private variables because
197 // PerfData should be write-only for security reasons
198 // (see perfData.hpp)
199 unsigned _allocating_threads;
200 unsigned _total_refills;
201 unsigned _max_refills;
202 size_t _total_allocation;
203 size_t _total_gc_waste;
204 size_t _max_gc_waste;
205 size_t _total_slow_refill_waste;
206 size_t _max_slow_refill_waste;
207 size_t _total_fast_refill_waste;
208 size_t _max_fast_refill_waste;
209 unsigned _total_slow_allocations;
210 unsigned _max_slow_allocations;
211
212 PerfVariable* _perf_allocating_threads;
213 PerfVariable* _perf_total_refills;
214 PerfVariable* _perf_max_refills;
215 PerfVariable* _perf_allocation;
216 PerfVariable* _perf_gc_waste;
217 PerfVariable* _perf_max_gc_waste;
218 PerfVariable* _perf_slow_refill_waste;
219 PerfVariable* _perf_max_slow_refill_waste;
220 PerfVariable* _perf_fast_refill_waste;
221 PerfVariable* _perf_max_fast_refill_waste;
222 PerfVariable* _perf_slow_allocations;
223 PerfVariable* _perf_max_slow_allocations;
224
225 AdaptiveWeightedAverage _allocating_threads_avg;
226
227 public:
228 GlobalTLABStats();
229
230 // Initialize all counters
231 void initialize();
232
233 // Write all perf counters to the perf_counters
234 void publish();
235
236 void print();
237
238 // Accessors
239 unsigned allocating_threads_avg() {
240 return MAX2((unsigned)(_allocating_threads_avg.average() + 0.5), 1U);
241 }
242
243 size_t allocation() {
244 return _total_allocation;
245 }
246
247 // Update methods
248
249 void update_allocating_threads() {
250 _allocating_threads++;
251 }
252 void update_number_of_refills(unsigned value) {
253 _total_refills += value;
254 _max_refills = MAX2(_max_refills, value);
255 }
256 void update_allocation(size_t value) {
257 _total_allocation += value;
258 }
259 void update_gc_waste(size_t value) {
260 _total_gc_waste += value;
261 _max_gc_waste = MAX2(_max_gc_waste, value);
262 }
263 void update_fast_refill_waste(size_t value) {
264 _total_fast_refill_waste += value;
265 _max_fast_refill_waste = MAX2(_max_fast_refill_waste, value);
266 }
267 void update_slow_refill_waste(size_t value) {
268 _total_slow_refill_waste += value;
269 _max_slow_refill_waste = MAX2(_max_slow_refill_waste, value);
270 }
271 void update_slow_allocations(unsigned value) {
272 _total_slow_allocations += value;
273 _max_slow_allocations = MAX2(_max_slow_allocations, value);
274 }
275 };
276
277 #endif // SHARE_VM_GC_SHARED_THREADLOCALALLOCBUFFER_HPP
--- EOF ---