29 #include "memory/memRegion.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "runtime/globals.hpp"
32 #include "utilities/globalDefinitions.hpp"
33 #include "utilities/macros.hpp"
34
35 // The CollectedHeap type requires subtypes to implement a method
36 // "block_start". For some subtypes, notably generational
37 // systems using card-table-based write barriers, the efficiency of this
38 // operation may be important. Implementations of the "BlockOffsetArray"
39 // class may be useful in providing such efficient implementations.
40 //
41 // BlockOffsetTable (abstract)
42 // - BlockOffsetArray (abstract)
43 // - BlockOffsetArrayNonContigSpace
44 // - BlockOffsetArrayContigSpace
45 //
46
47 class ContiguousSpace;
48
49 //////////////////////////////////////////////////////////////////////////
50 // The BlockOffsetTable "interface"
51 //////////////////////////////////////////////////////////////////////////
52 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
53 friend class VMStructs;
54 protected:
55 // These members describe the region covered by the table.
56
57 // The space this table is covering.
58 HeapWord* _bottom; // == reserved.start
59 HeapWord* _end; // End of currently allocated region.
60
61 public:
62 // Initialize the table to cover the given space.
63 // The contents of the initial table are undefined.
64 BlockOffsetTable(HeapWord* bottom, HeapWord* end):
65 _bottom(bottom), _end(end) {
66 assert(_bottom <= _end, "arguments out of order");
67 }
68
92 // indicates how far back one must go to find the start of the
93 // chunk that includes the first word of the subregion.
94 //
95 // Each BlockOffsetArray is owned by a Space. However, the actual array
96 // may be shared by several BlockOffsetArrays; this is useful
97 // when a single resizable area (such as a generation) is divided up into
98 // several spaces in which contiguous allocation takes place. (Consider,
99 // for example, the garbage-first generation.)
100
101 // Here is the shared array type.
102 //////////////////////////////////////////////////////////////////////////
103 // BlockOffsetSharedArray
104 //////////////////////////////////////////////////////////////////////////
105 class BlockOffsetSharedArray: public CHeapObj<mtGC> {
106 friend class BlockOffsetArray;
107 friend class BlockOffsetArrayNonContigSpace;
108 friend class BlockOffsetArrayContigSpace;
109 friend class VMStructs;
110
111 private:
112 enum SomePrivateConstants {
113 LogN = 9,
114 LogN_words = LogN - LogHeapWordSize,
115 N_bytes = 1 << LogN,
116 N_words = 1 << LogN_words
117 };
118
119 bool _init_to_zero;
120
121 // The reserved region covered by the shared array.
122 MemRegion _reserved;
123
124 // End of the current committed region.
125 HeapWord* _end;
126
127 // Array for keeping offsets for retrieving object start fast given an
128 // address.
129 VirtualSpace _vs;
130 u_char* _offset_array; // byte array keeping backwards offsets
131
132 void fill_range(size_t start, size_t num_cards, u_char offset) {
133 void* start_ptr = &_offset_array[start];
134 #if INCLUDE_ALL_GCS
135 // If collector is concurrent, special handling may be needed.
136 assert(!UseG1GC, "Shouldn't be here when using G1");
137 if (UseConcMarkSweepGC) {
138 memset_with_concurrent_readers(start_ptr, offset, num_cards);
146 // Bounds checking accessors:
147 // For performance these have to devolve to array accesses in product builds.
148 u_char offset_array(size_t index) const {
149 assert(index < _vs.committed_size(), "index out of range");
150 return _offset_array[index];
151 }
152 // An assertion-checking helper method for the set_offset_array() methods below.
153 void check_reducing_assertion(bool reducing);
154
155 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
156 check_reducing_assertion(reducing);
157 assert(index < _vs.committed_size(), "index out of range");
158 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
159 _offset_array[index] = offset;
160 }
161
162 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
163 check_reducing_assertion(reducing);
164 assert(index < _vs.committed_size(), "index out of range");
165 assert(high >= low, "addresses out of order");
166 assert(pointer_delta(high, low) <= N_words, "offset too large");
167 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
168 "Not reducing");
169 _offset_array[index] = (u_char)pointer_delta(high, low);
170 }
171
172 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
173 check_reducing_assertion(reducing);
174 assert(index_for(right - 1) < _vs.committed_size(),
175 "right address out of range");
176 assert(left < right, "Heap addresses out of order");
177 size_t num_cards = pointer_delta(right, left) >> LogN_words;
178
179 fill_range(index_for(left), num_cards, offset);
180 }
181
182 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
183 check_reducing_assertion(reducing);
184 assert(right < _vs.committed_size(), "right address out of range");
185 assert(left <= right, "indexes out of order");
186 size_t num_cards = right - left + 1;
187
188 fill_range(left, num_cards, offset);
189 }
190
191 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
192 assert(index < _vs.committed_size(), "index out of range");
193 assert(high >= low, "addresses out of order");
194 assert(pointer_delta(high, low) <= N_words, "offset too large");
195 assert(_offset_array[index] == pointer_delta(high, low),
196 "Wrong offset");
197 }
198
199 bool is_card_boundary(HeapWord* p) const;
200
201 // Return the number of slots needed for an offset array
202 // that covers mem_region_words words.
203 // We always add an extra slot because if an object
204 // ends on a card boundary we put a 0 in the next
205 // offset array slot, so we want that slot always
206 // to be reserved.
207
208 size_t compute_size(size_t mem_region_words) {
209 size_t number_of_slots = (mem_region_words / N_words) + 1;
210 return ReservedSpace::allocation_align_size_up(number_of_slots);
211 }
212
213 public:
214 // Initialize the table to cover from "base" to (at least)
215 // "base + init_word_size". In the future, the table may be expanded
216 // (see "resize" below) up to the size of "_reserved" (which must be at
217 // least "init_word_size".) The contents of the initial table are
218 // undefined; it is the responsibility of the constituent
219 // BlockOffsetTable(s) to initialize cards.
220 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
221
222 // Notes a change in the committed size of the region covered by the
223 // table. The "new_word_size" may not be larger than the size of the
224 // reserved region this table covers.
225 void resize(size_t new_word_size);
226
227 void set_bottom(HeapWord* new_bottom);
228
229 // Whether entries should be initialized to zero. Used currently only for
231 void set_init_to_zero(bool val) { _init_to_zero = val; }
232 bool init_to_zero() { return _init_to_zero; }
233
234 // Updates all the BlockOffsetArray's sharing this shared array to
235 // reflect the current "top"'s of their spaces.
236 void update_offset_arrays(); // Not yet implemented!
237
238 // Return the appropriate index into "_offset_array" for "p".
239 size_t index_for(const void* p) const;
240
241 // Return the address indicating the start of the region corresponding to
242 // "index" in "_offset_array".
243 HeapWord* address_for_index(size_t index) const;
244 };
245
246 //////////////////////////////////////////////////////////////////////////
247 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
248 //////////////////////////////////////////////////////////////////////////
249 class BlockOffsetArray: public BlockOffsetTable {
250 friend class VMStructs;
251 friend class G1BlockOffsetTablePart; // temp. until we restructure and cleanup
252 protected:
253 // The following enums are used by do_block_internal() below
254 enum Action {
255 Action_single, // BOT records a single block (see single_block())
256 Action_mark, // BOT marks the start of a block (see mark_block())
257 Action_check // Check that BOT records block correctly
258 // (see verify_single_block()).
259 };
260
261 enum SomePrivateConstants {
262 N_words = BlockOffsetSharedArray::N_words,
263 LogN = BlockOffsetSharedArray::LogN,
264 // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
265 // All entries are less than "N_words + N_powers".
266 LogBase = 4,
267 Base = (1 << LogBase),
268 N_powers = 14
269 };
270
271 static size_t power_to_cards_back(uint i) {
272 return (size_t)1 << (LogBase * i);
273 }
274 static size_t power_to_words_back(uint i) {
275 return power_to_cards_back(i) * N_words;
276 }
277 static size_t entry_to_cards_back(u_char entry) {
278 assert(entry >= N_words, "Precondition");
279 return power_to_cards_back(entry - N_words);
280 }
281 static size_t entry_to_words_back(u_char entry) {
282 assert(entry >= N_words, "Precondition");
283 return power_to_words_back(entry - N_words);
284 }
285
286 // The shared array, which is shared with other BlockOffsetArray's
287 // corresponding to different spaces within a generation or span of
288 // memory.
289 BlockOffsetSharedArray* _array;
290
291 // The space that owns this subregion.
292 Space* _sp;
293
294 // If true, array entries are initialized to 0; otherwise, they are
295 // initialized to point backwards to the beginning of the covered region.
296 bool _init_to_zero;
297
298 // An assertion-checking helper method for the set_remainder*() methods below.
299 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
300
301 // Sets the entries
302 // corresponding to the cards starting at "start" and ending at "end"
303 // to point back to the card before "start": the interval [start, end)
304 // is right-open. The last parameter, reducing, indicates whether the
305 // updates to individual entries always reduce the entry from a higher
327 // This would be legal C++, but MS VC++ doesn't allow it.
328 void set_space(Space* sp) { _sp = sp; }
329
330 // Resets the covered region to the given "mr".
331 void set_region(MemRegion mr) {
332 _bottom = mr.start();
333 _end = mr.end();
334 }
335
336 // Note that the committed size of the covered space may have changed,
337 // so the table size might also wish to change.
338 virtual void resize(size_t new_word_size) {
339 HeapWord* new_end = _bottom + new_word_size;
340 if (_end < new_end && !init_to_zero()) {
341 // verify that the old and new boundaries are also card boundaries
342 assert(_array->is_card_boundary(_end),
343 "_end not a card boundary");
344 assert(_array->is_card_boundary(new_end),
345 "new _end would not be a card boundary");
346 // set all the newly added cards
347 _array->set_offset_array(_end, new_end, N_words);
348 }
349 _end = new_end; // update _end
350 }
351
352 // Adjust the BOT to show that it has a single block in the
353 // range [blk_start, blk_start + size). All necessary BOT
354 // cards are adjusted, but _unallocated_block isn't.
355 void single_block(HeapWord* blk_start, HeapWord* blk_end);
356 void single_block(HeapWord* blk, size_t size) {
357 single_block(blk, blk + size);
358 }
359
360 // When the alloc_block() call returns, the block offset table should
361 // have enough information such that any subsequent block_start() call
362 // with an argument equal to an address that is within the range
363 // [blk_start, blk_end) would return the value blk_start, provided
364 // there have been no calls in between that reset this information
365 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
366 // for an appropriate range covering the said interval).
367 // These methods expect to be called with [blk_start, blk_end)
|
29 #include "memory/memRegion.hpp"
30 #include "memory/virtualspace.hpp"
31 #include "runtime/globals.hpp"
32 #include "utilities/globalDefinitions.hpp"
33 #include "utilities/macros.hpp"
34
35 // The CollectedHeap type requires subtypes to implement a method
36 // "block_start". For some subtypes, notably generational
37 // systems using card-table-based write barriers, the efficiency of this
38 // operation may be important. Implementations of the "BlockOffsetArray"
39 // class may be useful in providing such efficient implementations.
40 //
41 // BlockOffsetTable (abstract)
42 // - BlockOffsetArray (abstract)
43 // - BlockOffsetArrayNonContigSpace
44 // - BlockOffsetArrayContigSpace
45 //
46
47 class ContiguousSpace;
48
49 class BOTConstants : public AllStatic {
50 public:
51 static const uint LogN = 9;
52 static const uint LogN_words = LogN - LogHeapWordSize;
53 static const uint N_bytes = 1 << LogN;
54 static const uint N_words = 1 << LogN_words;
55 // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
56 // All entries are less than "N_words + N_powers".
57 static const uint LogBase = 4;
58 static const uint Base = (1 << LogBase);
59 static const uint N_powers = 14;
60
61 static size_t power_to_cards_back(uint i) {
62 return (size_t)1 << (LogBase * i);
63 }
64 static size_t power_to_words_back(uint i) {
65 return power_to_cards_back(i) * N_words;
66 }
67 static size_t entry_to_cards_back(u_char entry) {
68 assert(entry >= N_words, "Precondition");
69 return power_to_cards_back(entry - N_words);
70 }
71 static size_t entry_to_words_back(u_char entry) {
72 assert(entry >= N_words, "Precondition");
73 return power_to_words_back(entry - N_words);
74 }
75 };
76
77 //////////////////////////////////////////////////////////////////////////
78 // The BlockOffsetTable "interface"
79 //////////////////////////////////////////////////////////////////////////
80 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
81 friend class VMStructs;
82 protected:
83 // These members describe the region covered by the table.
84
85 // The space this table is covering.
86 HeapWord* _bottom; // == reserved.start
87 HeapWord* _end; // End of currently allocated region.
88
89 public:
90 // Initialize the table to cover the given space.
91 // The contents of the initial table are undefined.
92 BlockOffsetTable(HeapWord* bottom, HeapWord* end):
93 _bottom(bottom), _end(end) {
94 assert(_bottom <= _end, "arguments out of order");
95 }
96
120 // indicates how far back one must go to find the start of the
121 // chunk that includes the first word of the subregion.
122 //
123 // Each BlockOffsetArray is owned by a Space. However, the actual array
124 // may be shared by several BlockOffsetArrays; this is useful
125 // when a single resizable area (such as a generation) is divided up into
126 // several spaces in which contiguous allocation takes place. (Consider,
127 // for example, the garbage-first generation.)
128
129 // Here is the shared array type.
130 //////////////////////////////////////////////////////////////////////////
131 // BlockOffsetSharedArray
132 //////////////////////////////////////////////////////////////////////////
133 class BlockOffsetSharedArray: public CHeapObj<mtGC> {
134 friend class BlockOffsetArray;
135 friend class BlockOffsetArrayNonContigSpace;
136 friend class BlockOffsetArrayContigSpace;
137 friend class VMStructs;
138
139 private:
140 bool _init_to_zero;
141
142 // The reserved region covered by the shared array.
143 MemRegion _reserved;
144
145 // End of the current committed region.
146 HeapWord* _end;
147
148 // Array for keeping offsets for retrieving object start fast given an
149 // address.
150 VirtualSpace _vs;
151 u_char* _offset_array; // byte array keeping backwards offsets
152
153 void fill_range(size_t start, size_t num_cards, u_char offset) {
154 void* start_ptr = &_offset_array[start];
155 #if INCLUDE_ALL_GCS
156 // If collector is concurrent, special handling may be needed.
157 assert(!UseG1GC, "Shouldn't be here when using G1");
158 if (UseConcMarkSweepGC) {
159 memset_with_concurrent_readers(start_ptr, offset, num_cards);
167 // Bounds checking accessors:
168 // For performance these have to devolve to array accesses in product builds.
169 u_char offset_array(size_t index) const {
170 assert(index < _vs.committed_size(), "index out of range");
171 return _offset_array[index];
172 }
173 // An assertion-checking helper method for the set_offset_array() methods below.
174 void check_reducing_assertion(bool reducing);
175
176 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
177 check_reducing_assertion(reducing);
178 assert(index < _vs.committed_size(), "index out of range");
179 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
180 _offset_array[index] = offset;
181 }
182
183 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
184 check_reducing_assertion(reducing);
185 assert(index < _vs.committed_size(), "index out of range");
186 assert(high >= low, "addresses out of order");
187 assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");
188 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
189 "Not reducing");
190 _offset_array[index] = (u_char)pointer_delta(high, low);
191 }
192
193 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
194 check_reducing_assertion(reducing);
195 assert(index_for(right - 1) < _vs.committed_size(),
196 "right address out of range");
197 assert(left < right, "Heap addresses out of order");
198 size_t num_cards = pointer_delta(right, left) >> BOTConstants::LogN_words;
199
200 fill_range(index_for(left), num_cards, offset);
201 }
202
203 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
204 check_reducing_assertion(reducing);
205 assert(right < _vs.committed_size(), "right address out of range");
206 assert(left <= right, "indexes out of order");
207 size_t num_cards = right - left + 1;
208
209 fill_range(left, num_cards, offset);
210 }
211
212 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
213 assert(index < _vs.committed_size(), "index out of range");
214 assert(high >= low, "addresses out of order");
215 assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");
216 assert(_offset_array[index] == pointer_delta(high, low),
217 "Wrong offset");
218 }
219
220 bool is_card_boundary(HeapWord* p) const;
221
222 // Return the number of slots needed for an offset array
223 // that covers mem_region_words words.
224 // We always add an extra slot because if an object
225 // ends on a card boundary we put a 0 in the next
226 // offset array slot, so we want that slot always
227 // to be reserved.
228
229 size_t compute_size(size_t mem_region_words) {
230 size_t number_of_slots = (mem_region_words / BOTConstants::N_words) + 1;
231 return ReservedSpace::allocation_align_size_up(number_of_slots);
232 }
233
234 public:
235 // Initialize the table to cover from "base" to (at least)
236 // "base + init_word_size". In the future, the table may be expanded
237 // (see "resize" below) up to the size of "_reserved" (which must be at
238 // least "init_word_size".) The contents of the initial table are
239 // undefined; it is the responsibility of the constituent
240 // BlockOffsetTable(s) to initialize cards.
241 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
242
243 // Notes a change in the committed size of the region covered by the
244 // table. The "new_word_size" may not be larger than the size of the
245 // reserved region this table covers.
246 void resize(size_t new_word_size);
247
248 void set_bottom(HeapWord* new_bottom);
249
250 // Whether entries should be initialized to zero. Used currently only for
252 void set_init_to_zero(bool val) { _init_to_zero = val; }
253 bool init_to_zero() { return _init_to_zero; }
254
255 // Updates all the BlockOffsetArray's sharing this shared array to
256 // reflect the current "top"'s of their spaces.
257 void update_offset_arrays(); // Not yet implemented!
258
259 // Return the appropriate index into "_offset_array" for "p".
260 size_t index_for(const void* p) const;
261
262 // Return the address indicating the start of the region corresponding to
263 // "index" in "_offset_array".
264 HeapWord* address_for_index(size_t index) const;
265 };
266
267 //////////////////////////////////////////////////////////////////////////
268 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
269 //////////////////////////////////////////////////////////////////////////
270 class BlockOffsetArray: public BlockOffsetTable {
271 friend class VMStructs;
272 protected:
273 // The following enums are used by do_block_internal() below
274 enum Action {
275 Action_single, // BOT records a single block (see single_block())
276 Action_mark, // BOT marks the start of a block (see mark_block())
277 Action_check // Check that BOT records block correctly
278 // (see verify_single_block()).
279 };
280
281 // The shared array, which is shared with other BlockOffsetArray's
282 // corresponding to different spaces within a generation or span of
283 // memory.
284 BlockOffsetSharedArray* _array;
285
286 // The space that owns this subregion.
287 Space* _sp;
288
289 // If true, array entries are initialized to 0; otherwise, they are
290 // initialized to point backwards to the beginning of the covered region.
291 bool _init_to_zero;
292
293 // An assertion-checking helper method for the set_remainder*() methods below.
294 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
295
296 // Sets the entries
297 // corresponding to the cards starting at "start" and ending at "end"
298 // to point back to the card before "start": the interval [start, end)
299 // is right-open. The last parameter, reducing, indicates whether the
300 // updates to individual entries always reduce the entry from a higher
322 // This would be legal C++, but MS VC++ doesn't allow it.
323 void set_space(Space* sp) { _sp = sp; }
324
325 // Resets the covered region to the given "mr".
326 void set_region(MemRegion mr) {
327 _bottom = mr.start();
328 _end = mr.end();
329 }
330
331 // Note that the committed size of the covered space may have changed,
332 // so the table size might also wish to change.
333 virtual void resize(size_t new_word_size) {
334 HeapWord* new_end = _bottom + new_word_size;
335 if (_end < new_end && !init_to_zero()) {
336 // verify that the old and new boundaries are also card boundaries
337 assert(_array->is_card_boundary(_end),
338 "_end not a card boundary");
339 assert(_array->is_card_boundary(new_end),
340 "new _end would not be a card boundary");
341 // set all the newly added cards
342 _array->set_offset_array(_end, new_end, BOTConstants::N_words);
343 }
344 _end = new_end; // update _end
345 }
346
347 // Adjust the BOT to show that it has a single block in the
348 // range [blk_start, blk_start + size). All necessary BOT
349 // cards are adjusted, but _unallocated_block isn't.
350 void single_block(HeapWord* blk_start, HeapWord* blk_end);
351 void single_block(HeapWord* blk, size_t size) {
352 single_block(blk, blk + size);
353 }
354
355 // When the alloc_block() call returns, the block offset table should
356 // have enough information such that any subsequent block_start() call
357 // with an argument equal to an address that is within the range
358 // [blk_start, blk_end) would return the value blk_start, provided
359 // there have been no calls in between that reset this information
360 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
361 // for an appropriate range covering the said interval).
362 // These methods expect to be called with [blk_start, blk_end)
|