82 if (p != NULL && good_addr != 0) {
83 // The slow path returns a good/marked address, but we never mark oops
84 // in a weak load barrier so we always self heal with the remapped address.
85 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
86 const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
87 if (prev_addr != addr) {
88 // Some other thread overwrote the oop. The new
89 // oop is guaranteed to be weak good or null.
90 assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
91
92 // Return the good address instead of the weak good address
93 // to ensure that the currently active heap view is used.
94 good_addr = ZAddress::good_or_null(prev_addr);
95 }
96 }
97
98 return ZOop::to_oop(good_addr);
99 }
100
101 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
102 inline void ZBarrier::root_barrier(volatile oop* p, oop o) {
103 const uintptr_t addr = ZOop::to_address(o);
104
105 // Fast path
106 if (fast_path(addr)) {
107 return;
108 }
109
110 // Slow path
111 const uintptr_t good_addr = slow_path(addr);
112
113 // Non-atomic healing helps speed up root scanning. This is safe to do
114 // since we are always healing roots in a safepoint, which means we are
115 // never racing with mutators modifying roots while we are healing them.
116 // It's also safe in case multiple GC threads try to heal the same root,
117 // since they would always heal the root in the same way and it does not
118 // matter in which order it happens.
119 *p = ZOop::to_oop(good_addr);
120 }
121
122 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
264 // Mark barrier
265 //
266 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
267 // The fast path only checks for null since the GC worker
268 // threads doing marking wants to mark through good oops.
269 const oop o = *p;
270
271 if (finalizable) {
272 barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
273 } else {
274 barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
275 }
276 }
277
278 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
279 for (volatile const oop* const end = p + length; p < end; p++) {
280 mark_barrier_on_oop_field(p, finalizable);
281 }
282 }
283
284 inline void ZBarrier::mark_barrier_on_root_oop_field(volatile oop* p) {
285 const oop o = *p;
286 root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
287 }
288
289 //
290 // Relocate barrier
291 //
292 inline void ZBarrier::relocate_barrier_on_root_oop_field(volatile oop* p) {
293 const oop o = *p;
294 root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
295 }
296
297 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
|
82 if (p != NULL && good_addr != 0) {
83 // The slow path returns a good/marked address, but we never mark oops
84 // in a weak load barrier so we always self heal with the remapped address.
85 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
86 const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
87 if (prev_addr != addr) {
88 // Some other thread overwrote the oop. The new
89 // oop is guaranteed to be weak good or null.
90 assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
91
92 // Return the good address instead of the weak good address
93 // to ensure that the currently active heap view is used.
94 good_addr = ZAddress::good_or_null(prev_addr);
95 }
96 }
97
98 return ZOop::to_oop(good_addr);
99 }
100
101 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
102 inline void ZBarrier::root_barrier(oop* p, oop o) {
103 const uintptr_t addr = ZOop::to_address(o);
104
105 // Fast path
106 if (fast_path(addr)) {
107 return;
108 }
109
110 // Slow path
111 const uintptr_t good_addr = slow_path(addr);
112
113 // Non-atomic healing helps speed up root scanning. This is safe to do
114 // since we are always healing roots in a safepoint, which means we are
115 // never racing with mutators modifying roots while we are healing them.
116 // It's also safe in case multiple GC threads try to heal the same root,
117 // since they would always heal the root in the same way and it does not
118 // matter in which order it happens.
119 *p = ZOop::to_oop(good_addr);
120 }
121
122 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
264 // Mark barrier
265 //
266 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
267 // The fast path only checks for null since the GC worker
268 // threads doing marking wants to mark through good oops.
269 const oop o = *p;
270
271 if (finalizable) {
272 barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
273 } else {
274 barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
275 }
276 }
277
278 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
279 for (volatile const oop* const end = p + length; p < end; p++) {
280 mark_barrier_on_oop_field(p, finalizable);
281 }
282 }
283
284 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
285 const oop o = *p;
286 root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
287 }
288
289 //
290 // Relocate barrier
291 //
292 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
293 const oop o = *p;
294 root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
295 }
296
297 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
|