78 // Because of the requirement of keeping "_offsets" up to date with the
79 // allocations, we sequentialize these with a lock. Therefore, best if
80 // this is used for larger LAB allocations only.
81 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
82 MutexLocker x(&_par_alloc_lock);
83 return allocate(size);
84 }
85
86 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
87 return _offsets.block_start(p);
88 }
89
90 inline HeapWord*
91 G1OffsetTableContigSpace::block_start_const(const void* p) const {
92 return _offsets.block_start_const(p);
93 }
94
95 inline bool
96 HeapRegion::block_is_obj(const HeapWord* p) const {
97 G1CollectedHeap* g1h = G1CollectedHeap::heap();
98 if (ClassUnloadingWithConcurrentMark) {
99 return !g1h->is_obj_dead(oop(p), this);
100 }
101 return p < top();
102 }
103
104 inline size_t
105 HeapRegion::block_size(const HeapWord *addr) const {
106 if (addr == top()) {
107 return pointer_delta(end(), addr);
108 }
109
110 if (block_is_obj(addr)) {
111 return oop(addr)->size();
112 }
113
114 assert(ClassUnloadingWithConcurrentMark,
115 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
116 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
117 "addr: " PTR_FORMAT,
118 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
119
120 // Old regions' dead objects may have dead classes
121 // We need to find the next live object in some other
122 // manner than getting the oop size
123 G1CollectedHeap* g1h = G1CollectedHeap::heap();
124 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
125 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
126
127 assert(next > addr, "must get the next live object");
128 return pointer_delta(next, addr);
129 }
130
131 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
132 assert(is_young(), "we can only skip BOT updates on young regions");
133 return par_allocate_impl(word_size, end());
134 }
|
78 // Because of the requirement of keeping "_offsets" up to date with the
79 // allocations, we sequentialize these with a lock. Therefore, best if
80 // this is used for larger LAB allocations only.
81 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
82 MutexLocker x(&_par_alloc_lock);
83 return allocate(size);
84 }
85
86 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
87 return _offsets.block_start(p);
88 }
89
90 inline HeapWord*
91 G1OffsetTableContigSpace::block_start_const(const void* p) const {
92 return _offsets.block_start_const(p);
93 }
94
95 inline bool
96 HeapRegion::block_is_obj(const HeapWord* p) const {
97 G1CollectedHeap* g1h = G1CollectedHeap::heap();
98 if (ClassUnloading && ClassUnloadingWithConcurrentMark) {
99 return !g1h->is_obj_dead(oop(p), this);
100 }
101 return p < top();
102 }
103
104 inline size_t
105 HeapRegion::block_size(const HeapWord *addr) const {
106 if (addr == top()) {
107 return pointer_delta(end(), addr);
108 }
109
110 if (block_is_obj(addr)) {
111 return oop(addr)->size();
112 }
113
114 assert(ClassUnloading && ClassUnloadingWithConcurrentMark,
115 err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
116 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
117 "addr: " PTR_FORMAT,
118 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
119
120 // Old regions' dead objects may have dead classes
121 // We need to find the next live object in some other
122 // manner than getting the oop size
123 G1CollectedHeap* g1h = G1CollectedHeap::heap();
124 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
125 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
126
127 assert(next > addr, "must get the next live object");
128 return pointer_delta(next, addr);
129 }
130
131 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
132 assert(is_young(), "we can only skip BOT updates on young regions");
133 return par_allocate_impl(word_size, end());
134 }
|