9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
29 #include "memory/space.hpp"
30
31 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
32 if (addr >= _bottom && addr < _end) {
33 return block_start_unsafe(addr);
34 } else {
35 return NULL;
36 }
37 }
38
39 inline HeapWord*
40 G1BlockOffsetTable::block_start_const(const void* addr) const {
41 if (addr >= _bottom && addr < _end) {
42 return block_start_unsafe_const(addr);
43 } else {
44 return NULL;
45 }
46 }
47
48 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
52 err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
53 p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
54 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
55 size_t result = delta >> LogN;
56 check_index(result, "bad index from address");
57 return result;
58 }
59
60 inline HeapWord*
61 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
62 check_index(index, "index out of range");
63 HeapWord* result = _reserved.start() + (index << LogN_words);
64 assert(result >= _reserved.start() && result < _reserved.end(),
65 err_msg("bad address from index result " PTR_FORMAT
66 " _reserved.start() " PTR_FORMAT " _reserved.end() "
67 PTR_FORMAT,
68 p2i(result), p2i(_reserved.start()), p2i(_reserved.end())));
69 return result;
70 }
71
72 inline HeapWord*
73 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
74 bool has_max_index,
75 size_t max_index) const {
76 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
77 size_t index = _array->index_for(addr);
78 // We must make sure that the offset table entry we use is valid. If
79 // "addr" is past the end, start at the last known one and go forward.
80 if (has_max_index) {
81 index = MIN2(index, max_index);
82 }
83 HeapWord* q = _array->address_for_index(index);
84
85 uint offset = _array->offset_array(index); // Extend u_char to uint.
86 while (offset >= N_words) {
87 // The excess of the offset from N_words indicates a power of Base
88 // to go back by.
89 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
90 q -= (N_words * n_cards_back);
91 assert(q >= _sp->bottom(), "Went below bottom!");
92 index -= n_cards_back;
93 offset = _array->offset_array(index);
94 }
95 assert(offset < N_words, "offset too large");
96 q -= offset;
97 return q;
98 }
99
100 inline HeapWord*
101 G1BlockOffsetArray::
102 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
103 const void* addr) const {
104 if (csp() != NULL) {
105 if (addr >= csp()->top()) return csp()->top();
106 while (n <= addr) {
107 q = n;
108 oop obj = oop(q);
109 if (obj->klass_or_null() == NULL) return q;
110 n += obj->size();
111 }
112 } else {
113 while (n <= addr) {
114 q = n;
115 oop obj = oop(q);
116 if (obj->klass_or_null() == NULL) return q;
117 n += _sp->block_size(q);
118 }
119 }
120 assert(q <= n, "wrong order for q and addr");
121 assert(addr < n, "wrong order for addr and n");
122 return q;
123 }
124
125 inline HeapWord*
126 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
127 const void* addr) {
128 if (oop(q)->klass_or_null() == NULL) return q;
129 HeapWord* n = q + _sp->block_size(q);
130 // In the normal case, where the query "addr" is a card boundary, and the
131 // offset table chunks are the same size as cards, the block starting at
132 // "q" will contain addr, so the test below will fail, and we'll fall
133 // through quickly.
134 if (n <= addr) {
135 q = forward_to_block_containing_addr_slow(q, n, addr);
136 }
137 assert(q <= addr, "wrong order for current and arg");
138 return q;
139 }
140
141 //////////////////////////////////////////////////////////////////////////
142 // BlockOffsetArrayNonContigSpace inlines
143 //////////////////////////////////////////////////////////////////////////
144 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
145 // Verify that the BOT shows [blk_start, blk_end) to be one block.
146 verify_single_block(blk_start, blk_end);
147 // adjust _unallocated_block upward or downward
148 // as appropriate
149 if (BlockOffsetArrayUseUnallocatedBlock) {
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
27
28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
29 #include "gc_implementation/g1/heapRegion.hpp"
30 #include "memory/space.hpp"
31
32 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
33 if (addr >= _bottom && addr < _end) {
34 return block_start_unsafe(addr);
35 } else {
36 return NULL;
37 }
38 }
39
40 inline HeapWord*
41 G1BlockOffsetTable::block_start_const(const void* addr) const {
42 if (addr >= _bottom && addr < _end) {
43 return block_start_unsafe_const(addr);
44 } else {
45 return NULL;
46 }
47 }
48
49 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
53 err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
54 p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
55 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
56 size_t result = delta >> LogN;
57 check_index(result, "bad index from address");
58 return result;
59 }
60
61 inline HeapWord*
62 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
63 check_index(index, "index out of range");
64 HeapWord* result = _reserved.start() + (index << LogN_words);
65 assert(result >= _reserved.start() && result < _reserved.end(),
66 err_msg("bad address from index result " PTR_FORMAT
67 " _reserved.start() " PTR_FORMAT " _reserved.end() "
68 PTR_FORMAT,
69 p2i(result), p2i(_reserved.start()), p2i(_reserved.end())));
70 return result;
71 }
72
73 inline size_t
74 G1BlockOffsetArray::block_size(const HeapWord* p) const {
75 return gsp()->block_size(p);
76 }
77
78 inline HeapWord*
79 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
80 bool has_max_index,
81 size_t max_index) const {
82 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
83 size_t index = _array->index_for(addr);
84 // We must make sure that the offset table entry we use is valid. If
85 // "addr" is past the end, start at the last known one and go forward.
86 if (has_max_index) {
87 index = MIN2(index, max_index);
88 }
89 HeapWord* q = _array->address_for_index(index);
90
91 uint offset = _array->offset_array(index); // Extend u_char to uint.
92 while (offset >= N_words) {
93 // The excess of the offset from N_words indicates a power of Base
94 // to go back by.
95 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
96 q -= (N_words * n_cards_back);
97 assert(q >= gsp()->bottom(), "Went below bottom!");
98 index -= n_cards_back;
99 offset = _array->offset_array(index);
100 }
101 assert(offset < N_words, "offset too large");
102 q -= offset;
103 return q;
104 }
105
106 inline HeapWord*
107 G1BlockOffsetArray::
108 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
109 const void* addr) const {
110 if (addr >= gsp()->top()) return gsp()->top();
111 while (n <= addr) {
112 q = n;
113 oop obj = oop(q);
114 if (obj->klass_or_null() == NULL) return q;
115 n += block_size(q);
116 }
117 assert(q <= n, "wrong order for q and addr");
118 assert(addr < n, "wrong order for addr and n");
119 return q;
120 }
121
122 inline HeapWord*
123 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
124 const void* addr) {
125 if (oop(q)->klass_or_null() == NULL) return q;
126 HeapWord* n = q + block_size(q);
127 // In the normal case, where the query "addr" is a card boundary, and the
128 // offset table chunks are the same size as cards, the block starting at
129 // "q" will contain addr, so the test below will fail, and we'll fall
130 // through quickly.
131 if (n <= addr) {
132 q = forward_to_block_containing_addr_slow(q, n, addr);
133 }
134 assert(q <= addr, "wrong order for current and arg");
135 return q;
136 }
137
138 //////////////////////////////////////////////////////////////////////////
139 // BlockOffsetArrayNonContigSpace inlines
140 //////////////////////////////////////////////////////////////////////////
141 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
142 // Verify that the BOT shows [blk_start, blk_end) to be one block.
143 verify_single_block(blk_start, blk_end);
144 // adjust _unallocated_block upward or downward
145 // as appropriate
146 if (BlockOffsetArrayUseUnallocatedBlock) {
|