340 // storage in the heap comes from a young region or not.
341 // See ReduceInitialCardMarks.
342 virtual bool can_elide_tlab_store_barriers() const {
343 return true;
344 }
345
346 // If a compiler is eliding store barriers for TLAB-allocated objects,
347 // we will be informed of a slow-path allocation by a call
348 // to on_slowpath_allocation_exit() below. Such a call precedes the
349 // initialization of the object itself, and no post-store-barriers will
350 // be issued. Some heap types require that the barrier strictly follows
351 // the initializing stores. (This is currently implemented by deferring the
352 // barrier until the next slow-path allocation or gc-related safepoint.)
353 // This interface answers whether a particular barrier type needs the card
354 // mark to be thus strictly sequenced after the stores.
355 virtual bool card_mark_must_follow_store() const = 0;
356
357 virtual bool is_in_young(oop obj) const = 0;
358
359 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
360 virtual void flush_deferred_barriers(JavaThread* thread);
361
362 virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
363
364 template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
365 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
366 };
367
368 template<>
369 struct BarrierSet::GetName<CardTableModRefBS> {
370 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
371 };
372
373 template<>
374 struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
375 typedef CardTableModRefBS type;
376 };
377
378 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|
340 // storage in the heap comes from a young region or not.
341 // See ReduceInitialCardMarks.
342 virtual bool can_elide_tlab_store_barriers() const {
343 return true;
344 }
345
346 // If a compiler is eliding store barriers for TLAB-allocated objects,
347 // we will be informed of a slow-path allocation by a call
348 // to on_slowpath_allocation_exit() below. Such a call precedes the
349 // initialization of the object itself, and no post-store-barriers will
350 // be issued. Some heap types require that the barrier strictly follows
351 // the initializing stores. (This is currently implemented by deferring the
352 // barrier until the next slow-path allocation or gc-related safepoint.)
353 // This interface answers whether a particular barrier type needs the card
354 // mark to be thus strictly sequenced after the stores.
355 virtual bool card_mark_must_follow_store() const = 0;
356
357 virtual bool is_in_young(oop obj) const = 0;
358
359 virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
360 virtual void on_thread_destroy(JavaThread* thread);
361
362 virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
363
364 template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
365 class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
366 };
367
368 template<>
369 struct BarrierSet::GetName<CardTableModRefBS> {
370 static const BarrierSet::Name value = BarrierSet::CardTableModRef;
371 };
372
373 template<>
374 struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
375 typedef CardTableModRefBS type;
376 };
377
378 #endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
|