1 /*
2 * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1AllocRegion.inline.hpp"
27 #include "gc/g1/g1EvacStats.inline.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "runtime/orderAccess.inline.hpp"
30
31 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
32 HeapRegion* G1AllocRegion::_dummy_region = NULL;
33
34 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
35 assert(_dummy_region == NULL, "should be set once");
36 assert(dummy_region != NULL, "pre-condition");
37 assert(dummy_region->free() == 0, "pre-condition");
38
39 // Make sure that any allocation attempt on this region will fail
40 // and will not trigger any asserts.
41 assert(allocate(dummy_region, 1, false) == NULL, "should fail");
42 assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
43 assert(allocate(dummy_region, 1, true) == NULL, "should fail");
44 assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
45
46 _g1h = g1h;
47 _dummy_region = dummy_region;
48 }
49
50 size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
51 bool bot_updates) {
52 assert(alloc_region != NULL && alloc_region != _dummy_region,
53 "pre-condition");
54 size_t result = 0;
55
56 // Other threads might still be trying to allocate using a CAS out
57 // of the region we are trying to retire, as they can do so without
58 // holding the lock. So, we first have to make sure that noone else
59 // can allocate out of it by doing a maximal allocation. Even if our
60 // CAS attempt fails a few times, we'll succeed sooner or later
61 // given that failed CAS attempts mean that the region is getting
62 // closed to being full.
63 size_t free_word_size = alloc_region->free() / HeapWordSize;
64
65 // This is the minimum free chunk we can turn into a dummy
66 // object. If the free space falls below this, then noone can
67 // allocate in this region anyway (all allocation requests will be
68 // of a size larger than this) so we won't have to perform the dummy
69 // allocation.
70 size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
71
72 while (free_word_size >= min_word_size_to_fill) {
73 HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
74 if (dummy != NULL) {
75 // If the allocation was successful we should fill in the space.
76 CollectedHeap::fill_with_object(dummy, free_word_size);
77 alloc_region->set_pre_dummy_top(dummy);
78 result += free_word_size * HeapWordSize;
79 break;
80 }
81
82 free_word_size = alloc_region->free() / HeapWordSize;
83 // It's also possible that someone else beats us to the
84 // allocation and they fill up the region. In that case, we can
85 // just get out of the loop.
86 }
87 result += alloc_region->free();
88
89 assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
90 "post-condition");
91 return result;
92 }
93
94 size_t G1AllocRegion::retire(bool fill_up) {
95 assert_alloc_region(_alloc_region != NULL, "not initialized properly");
96
97 size_t result = 0;
98
99 trace("retiring");
100 HeapRegion* alloc_region = _alloc_region;
101 if (alloc_region != _dummy_region) {
102 // We never have to check whether the active region is empty or not,
103 // and potentially free it if it is, given that it's guaranteed that
104 // it will never be empty.
105 assert_alloc_region(!alloc_region->is_empty(),
106 "the alloc region should never be empty");
107
108 if (fill_up) {
109 result = fill_up_remaining_space(alloc_region, _bot_updates);
110 }
111
112 assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant");
113 size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
114 retire_region(alloc_region, allocated_bytes);
115 _used_bytes_before = 0;
116 _alloc_region = _dummy_region;
117 }
118 trace("retired");
119
120 return result;
121 }
122
123 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
124 bool force) {
125 assert_alloc_region(_alloc_region == _dummy_region, "pre-condition");
126 assert_alloc_region(_used_bytes_before == 0, "pre-condition");
127
128 trace("attempting region allocation");
129 HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
130 if (new_alloc_region != NULL) {
131 new_alloc_region->reset_pre_dummy_top();
132 // Need to do this before the allocation
133 _used_bytes_before = new_alloc_region->used();
134 HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
135 assert_alloc_region(result != NULL, "the allocation should succeeded");
136
137 OrderAccess::storestore();
138 // Note that we first perform the allocation and then we store the
139 // region in _alloc_region. This is the reason why an active region
140 // can never be empty.
141 update_alloc_region(new_alloc_region);
142 trace("region allocation successful");
143 return result;
144 } else {
145 trace("region allocation failed");
146 return NULL;
147 }
148 ShouldNotReachHere();
149 }
150
151 void G1AllocRegion::init() {
152 trace("initializing");
153 assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition");
154 assert_alloc_region(_dummy_region != NULL, "should have been set");
155 _alloc_region = _dummy_region;
156 _count = 0;
157 trace("initialized");
158 }
159
160 void G1AllocRegion::set(HeapRegion* alloc_region) {
161 trace("setting");
162 // We explicitly check that the region is not empty to make sure we
163 // maintain the "the alloc region cannot be empty" invariant.
164 assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
165 assert_alloc_region(_alloc_region == _dummy_region &&
166 _used_bytes_before == 0 && _count == 0,
167 "pre-condition");
168
169 _used_bytes_before = alloc_region->used();
170 _alloc_region = alloc_region;
171 _count += 1;
172 trace("set");
173 }
174
175 void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
176 trace("update");
177 // We explicitly check that the region is not empty to make sure we
178 // maintain the "the alloc region cannot be empty" invariant.
179 assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
180
181 _alloc_region = alloc_region;
182 _alloc_region->set_allocation_context(allocation_context());
183 _count += 1;
184 trace("updated");
185 }
186
187 HeapRegion* G1AllocRegion::release() {
188 trace("releasing");
189 HeapRegion* alloc_region = _alloc_region;
190 retire(false /* fill_up */);
191 assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()");
192 _alloc_region = NULL;
193 trace("released");
194 return (alloc_region == _dummy_region) ? NULL : alloc_region;
195 }
196
197 #if G1_ALLOC_REGION_TRACING
198 void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) {
199 // All the calls to trace that set either just the size or the size
200 // and the result are considered part of level 2 tracing and are
201 // skipped during level 1 tracing.
202 if ((actual_word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
203 const size_t buffer_length = 128;
204 char hr_buffer[buffer_length];
205 char rest_buffer[buffer_length];
206
207 HeapRegion* alloc_region = _alloc_region;
208 if (alloc_region == NULL) {
209 jio_snprintf(hr_buffer, buffer_length, "NULL");
210 } else if (alloc_region == _dummy_region) {
211 jio_snprintf(hr_buffer, buffer_length, "DUMMY");
212 } else {
213 jio_snprintf(hr_buffer, buffer_length,
214 HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
215 }
216
217 if (G1_ALLOC_REGION_TRACING > 1) {
218 if (result != NULL) {
219 jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
220 min_word_size, desired_word_size, actual_word_size, result);
221 } else if (min_word_size != 0) {
222 jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
223 } else {
224 jio_snprintf(rest_buffer, buffer_length, "");
225 }
226 } else {
227 jio_snprintf(rest_buffer, buffer_length, "");
228 }
229
230 tty->print_cr("[%s] %u %s : %s %s",
231 _name, _count, hr_buffer, str, rest_buffer);
232 }
233 }
234 #endif // G1_ALLOC_REGION_TRACING
235
236 G1AllocRegion::G1AllocRegion(const char* name,
237 bool bot_updates)
238 : _name(name), _bot_updates(bot_updates),
239 _alloc_region(NULL), _count(0), _used_bytes_before(0),
240 _allocation_context(AllocationContext::system()) { }
241
242
243 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
244 bool force) {
245 return _g1h->new_mutator_alloc_region(word_size, force);
246 }
247
248 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
249 size_t allocated_bytes) {
250 _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
251 }
252
253 HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
254 bool force) {
255 assert(!force, "not supported for GC alloc regions");
256 return _g1h->new_gc_alloc_region(word_size, count(), _purpose);
257 }
258
259 void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
260 size_t allocated_bytes) {
261 _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose);
262 }
263
264 size_t G1GCAllocRegion::retire(bool fill_up) {
265 HeapRegion* retired = get();
266 size_t end_waste = G1AllocRegion::retire(fill_up);
267 // Do not count retirement of the dummy allocation region.
268 if (retired != NULL) {
269 _stats->add_region_end_waste(end_waste / HeapWordSize);
270 }
271 return end_waste;
272 }
273
274 HeapRegion* OldGCAllocRegion::release() {
275 HeapRegion* cur = get();
276 if (cur != NULL) {
277 // Determine how far we are from the next card boundary. If it is smaller than
278 // the minimum object size we can allocate into, expand into the next card.
279 HeapWord* top = cur->top();
280 HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, BOTConstants::N_bytes);
281
282 size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
283
284 if (to_allocate_words != 0) {
285 // We are not at a card boundary. Fill up, possibly into the next, taking the
286 // end of the region and the minimum object size into account.
287 to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
288 MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
289
290 // Skip allocation if there is not enough space to allocate even the smallest
291 // possible object. In this case this region will not be retained, so the
292 // original problem cannot occur.
293 if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
294 HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
295 CollectedHeap::fill_with_object(dummy, to_allocate_words);
296 }
297 }
298 }
299 return G1AllocRegion::release();
300 }
--- EOF ---