21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 public:
48 // Atomic operations on int64 types are not available on all 32-bit
49 // platforms. If atomic ops on int64 are defined here they must only
50 // be used from code that verifies they are available at runtime and
51 // can provide an alternative action if not - see supports_cx8() for
52 // a means to test availability.
53
54 // The memory operations that are mentioned with each of the atomic
55 // function families come from src/share/vm/runtime/orderAccess.hpp,
56 // e.g., <fence> is described in that file and is implemented by the
57 // OrderAccess::fence() function. See that file for the gory details
58 // on the Memory Access Ordering Model.
59
60 // All of the atomic operations that imply a read-modify-write action
61 // guarantee a two-way memory barrier across that operation. Historically
63 // provided on SPARC/X86. We assume that strength is necessary unless
64 // we can prove that a weaker form is sufficiently safe.
65
66 // Atomically store to a location
67 // The type T must be either a pointer type convertible to or equal
68 // to D, an integral/enum type equal to D, or a type equal to D that
69 // is primitive convertible using PrimitiveConversions.
70 template<typename T, typename D>
71 inline static void store(T store_value, volatile D* dest);
72
73 // Atomically load from a location
74 // The type T must be either a pointer type, an integral/enum type,
75 // or a type that is primitive convertible using PrimitiveConversions.
76 template<typename T>
77 inline static T load(const volatile T* dest);
78
79 // Atomically add to a location. Returns updated value. add*() provide:
80 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
81
82 template<typename I, typename D>
83 inline static D add(I add_value, D volatile* dest);
84
85 template<typename I, typename D>
86 inline static D sub(I sub_value, D volatile* dest);
87
88 // Atomically increment location. inc() provide:
89 // <fence> increment-dest <membar StoreLoad|StoreStore>
90 // The type D may be either a pointer type, or an integral
91 // type. If it is a pointer type, then the increment is
92 // scaled to the size of the type pointed to by the pointer.
93 template<typename D>
94 inline static void inc(D volatile* dest);
95
96 // Atomically decrement a location. dec() provide:
97 // <fence> decrement-dest <membar StoreLoad|StoreStore>
98 // The type D may be either a pointer type, or an integral
99 // type. If it is a pointer type, then the decrement is
100 // scaled to the size of the type pointed to by the pointer.
101 template<typename D>
102 inline static void dec(D volatile* dest);
103
471 // storing types that are pointer sized or smaller. If a platform still
472 // supports wide atomics, then it has to use specialization
473 // of Atomic::PlatformStore for that wider size class.
474 template<size_t byte_size>
475 struct Atomic::PlatformStore {
476 template<typename T>
477 void operator()(T new_value,
478 T volatile* dest) const {
479 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
480 (void)const_cast<T&>(*dest = new_value);
481 }
482 };
483
484 // Define FetchAndAdd and AddAndFetch helper classes before including
485 // platform file, which may use these as base classes, requiring they
486 // be complete.
487
488 template<typename Derived>
489 struct Atomic::FetchAndAdd {
490 template<typename I, typename D>
491 D operator()(I add_value, D volatile* dest) const;
492 };
493
494 template<typename Derived>
495 struct Atomic::AddAndFetch {
496 template<typename I, typename D>
497 D operator()(I add_value, D volatile* dest) const;
498 };
499
500 template<typename D>
501 inline void Atomic::inc(D volatile* dest) {
502 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
503 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
504 Atomic::add(I(1), dest);
505 }
506
507 template<typename D>
508 inline void Atomic::dec(D volatile* dest) {
509 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
510 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
511 // Assumes two's complement integer representation.
512 #pragma warning(suppress: 4146)
513 Atomic::add(I(-1), dest);
514 }
515
516 template<typename I, typename D>
517 inline D Atomic::sub(I sub_value, D volatile* dest) {
572 #include OS_CPU_HEADER(atomic)
573
574 // shared in-line definitions
575
576 // size_t casts...
577 #if (SIZE_MAX != UINTPTR_MAX)
578 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
579 #endif
580
581 template<typename T>
582 inline T Atomic::load(const volatile T* dest) {
583 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
584 }
585
586 template<typename T, typename D>
587 inline void Atomic::store(T store_value, volatile D* dest) {
588 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
589 }
590
591 template<typename I, typename D>
592 inline D Atomic::add(I add_value, D volatile* dest) {
593 return AddImpl<I, D>()(add_value, dest);
594 }
595
596 template<typename I, typename D>
597 struct Atomic::AddImpl<
598 I, D,
599 typename EnableIf<IsIntegral<I>::value &&
600 IsIntegral<D>::value &&
601 (sizeof(I) <= sizeof(D)) &&
602 (IsSigned<I>::value == IsSigned<D>::value)>::type>
603 {
604 D operator()(I add_value, D volatile* dest) const {
605 D addend = add_value;
606 return PlatformAdd<sizeof(D)>()(addend, dest);
607 }
608 };
609
610 template<typename I, typename P>
611 struct Atomic::AddImpl<
612 I, P*,
613 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
614 {
615 P* operator()(I add_value, P* volatile* dest) const {
616 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
617 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
618 typedef typename Conditional<IsSigned<I>::value,
619 intptr_t,
620 uintptr_t>::type CI;
621 CI addend = add_value;
622 return PlatformAdd<sizeof(P*)>()(addend, dest);
623 }
624 };
625
626 // Most platforms do not support atomic add on a 2-byte value. However,
627 // if the value occupies the most significant 16 bits of an aligned 32-bit
628 // word, then we can do this with an atomic add of (add_value << 16)
629 // to the 32-bit word.
630 //
631 // The least significant parts of this 32-bit word will never be affected, even
632 // in case of overflow/underflow.
633 //
634 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
635 template<>
636 struct Atomic::AddImpl<short, short> {
637 short operator()(short add_value, short volatile* dest) const {
638 #ifdef VM_LITTLE_ENDIAN
639 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
640 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
641 #else
642 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
643 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
644 #endif
645 return (short)(new_value >> 16); // preserves sign
646 }
647 };
648
649 template<typename Derived>
650 template<typename I, typename D>
651 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
652 I addend = add_value;
653 // If D is a pointer type P*, scale by sizeof(P).
654 if (IsPointer<D>::value) {
655 addend *= sizeof(typename RemovePointer<D>::type);
656 }
657 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
658 return old + add_value;
659 }
660
661 template<typename Derived>
662 template<typename I, typename D>
663 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
664 // If D is a pointer type P*, scale by sizeof(P).
665 if (IsPointer<D>::value) {
666 add_value *= sizeof(typename RemovePointer<D>::type);
667 }
668 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
669 }
670
671 template<typename Type, typename Fn, typename I, typename D>
672 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
673 return PrimitiveConversions::cast<D>(
674 fn(PrimitiveConversions::cast<Type>(add_value),
675 reinterpret_cast<Type volatile*>(dest)));
676 }
677
678 template<typename T, typename D, typename U>
679 inline D Atomic::cmpxchg(T exchange_value,
680 D volatile* dest,
681 U compare_value,
682 cmpxchg_memory_order order) {
683 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
684 }
685
686 template<typename T, typename D>
687 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
688 cmpxchg_memory_order order) {
|
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isPointer.hpp"
33 #include "metaprogramming/isSame.hpp"
34 #include "metaprogramming/primitiveConversions.hpp"
35 #include "metaprogramming/removeCV.hpp"
36 #include "metaprogramming/removePointer.hpp"
37 #include "utilities/align.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed = 0,
42 memory_order_acquire = 2,
43 memory_order_release = 3,
44 memory_order_acq_rel = 4,
45 // Use value which doesn't interfere with C++2011. We need to be more conservative.
46 memory_order_conservative = 8
47 };
48
49 class Atomic : AllStatic {
50 public:
51 // Atomic operations on int64 types are not available on all 32-bit
52 // platforms. If atomic ops on int64 are defined here they must only
53 // be used from code that verifies they are available at runtime and
54 // can provide an alternative action if not - see supports_cx8() for
55 // a means to test availability.
56
57 // The memory operations that are mentioned with each of the atomic
58 // function families come from src/share/vm/runtime/orderAccess.hpp,
59 // e.g., <fence> is described in that file and is implemented by the
60 // OrderAccess::fence() function. See that file for the gory details
61 // on the Memory Access Ordering Model.
62
63 // All of the atomic operations that imply a read-modify-write action
64 // guarantee a two-way memory barrier across that operation. Historically
66 // provided on SPARC/X86. We assume that strength is necessary unless
67 // we can prove that a weaker form is sufficiently safe.
68
69 // Atomically store to a location
70 // The type T must be either a pointer type convertible to or equal
71 // to D, an integral/enum type equal to D, or a type equal to D that
72 // is primitive convertible using PrimitiveConversions.
73 template<typename T, typename D>
74 inline static void store(T store_value, volatile D* dest);
75
76 // Atomically load from a location
77 // The type T must be either a pointer type, an integral/enum type,
78 // or a type that is primitive convertible using PrimitiveConversions.
79 template<typename T>
80 inline static T load(const volatile T* dest);
81
82 // Atomically add to a location. Returns updated value. add*() provide:
83 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
84
85 template<typename I, typename D>
86 inline static D add(I add_value, D volatile* dest,
87 cmpxchg_memory_order order = memory_order_acq_rel);
88
89 template<typename I, typename D>
90 inline static D sub(I sub_value, D volatile* dest);
91
92 // Atomically increment location. inc() provide:
93 // <fence> increment-dest <membar StoreLoad|StoreStore>
94 // The type D may be either a pointer type, or an integral
95 // type. If it is a pointer type, then the increment is
96 // scaled to the size of the type pointed to by the pointer.
97 template<typename D>
98 inline static void inc(D volatile* dest);
99
100 // Atomically decrement a location. dec() provide:
101 // <fence> decrement-dest <membar StoreLoad|StoreStore>
102 // The type D may be either a pointer type, or an integral
103 // type. If it is a pointer type, then the decrement is
104 // scaled to the size of the type pointed to by the pointer.
105 template<typename D>
106 inline static void dec(D volatile* dest);
107
475 // storing types that are pointer sized or smaller. If a platform still
476 // supports wide atomics, then it has to use specialization
477 // of Atomic::PlatformStore for that wider size class.
478 template<size_t byte_size>
479 struct Atomic::PlatformStore {
480 template<typename T>
481 void operator()(T new_value,
482 T volatile* dest) const {
483 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
484 (void)const_cast<T&>(*dest = new_value);
485 }
486 };
487
488 // Define FetchAndAdd and AddAndFetch helper classes before including
489 // platform file, which may use these as base classes, requiring they
490 // be complete.
491
492 template<typename Derived>
493 struct Atomic::FetchAndAdd {
494 template<typename I, typename D>
495 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
496 };
497
498 template<typename Derived>
499 struct Atomic::AddAndFetch {
500 template<typename I, typename D>
501 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
502 };
503
504 template<typename D>
505 inline void Atomic::inc(D volatile* dest) {
506 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
507 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
508 Atomic::add(I(1), dest);
509 }
510
511 template<typename D>
512 inline void Atomic::dec(D volatile* dest) {
513 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
514 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
515 // Assumes two's complement integer representation.
516 #pragma warning(suppress: 4146)
517 Atomic::add(I(-1), dest);
518 }
519
520 template<typename I, typename D>
521 inline D Atomic::sub(I sub_value, D volatile* dest) {
576 #include OS_CPU_HEADER(atomic)
577
578 // shared in-line definitions
579
580 // size_t casts...
581 #if (SIZE_MAX != UINTPTR_MAX)
582 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
583 #endif
584
585 template<typename T>
586 inline T Atomic::load(const volatile T* dest) {
587 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
588 }
589
590 template<typename T, typename D>
591 inline void Atomic::store(T store_value, volatile D* dest) {
592 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
593 }
594
595 template<typename I, typename D>
596 inline D Atomic::add(I add_value, D volatile* dest,
597 cmpxchg_memory_order order) {
598 return AddImpl<I, D>()(add_value, dest, order);
599 }
600
601 template<typename I, typename D>
602 struct Atomic::AddImpl<
603 I, D,
604 typename EnableIf<IsIntegral<I>::value &&
605 IsIntegral<D>::value &&
606 (sizeof(I) <= sizeof(D)) &&
607 (IsSigned<I>::value == IsSigned<D>::value)>::type>
608 {
609 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const {
610 D addend = add_value;
611 return PlatformAdd<sizeof(D)>()(addend, dest, order);
612 }
613 };
614
615 template<typename I, typename P>
616 struct Atomic::AddImpl<
617 I, P*,
618 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
619 {
620 P* operator()(I add_value, P* volatile* dest, cmpxchg_memory_order order) const {
621 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
622 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
623 typedef typename Conditional<IsSigned<I>::value,
624 intptr_t,
625 uintptr_t>::type CI;
626 CI addend = add_value;
627 return PlatformAdd<sizeof(P*)>()(addend, dest, order);
628 }
629 };
630
631 // Most platforms do not support atomic add on a 2-byte value. However,
632 // if the value occupies the most significant 16 bits of an aligned 32-bit
633 // word, then we can do this with an atomic add of (add_value << 16)
634 // to the 32-bit word.
635 //
636 // The least significant parts of this 32-bit word will never be affected, even
637 // in case of overflow/underflow.
638 //
639 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
640 template<>
641 struct Atomic::AddImpl<short, short> {
642 short operator()(short add_value, short volatile* dest, cmpxchg_memory_order order) const {
643 #ifdef VM_LITTLE_ENDIAN
644 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
645 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order);
646 #else
647 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
648 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order);
649 #endif
650 return (short)(new_value >> 16); // preserves sign
651 }
652 };
653
654 template<typename Derived>
655 template<typename I, typename D>
656 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
657 cmpxchg_memory_order order) const {
658 I addend = add_value;
659 // If D is a pointer type P*, scale by sizeof(P).
660 if (IsPointer<D>::value) {
661 addend *= sizeof(typename RemovePointer<D>::type);
662 }
663 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
664 return old + add_value;
665 }
666
667 template<typename Derived>
668 template<typename I, typename D>
669 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
670 cmpxchg_memory_order order) const {
671 // If D is a pointer type P*, scale by sizeof(P).
672 if (IsPointer<D>::value) {
673 add_value *= sizeof(typename RemovePointer<D>::type);
674 }
675 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
676 }
677
678 template<typename Type, typename Fn, typename I, typename D>
679 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
680 return PrimitiveConversions::cast<D>(
681 fn(PrimitiveConversions::cast<Type>(add_value),
682 reinterpret_cast<Type volatile*>(dest)));
683 }
684
685 template<typename T, typename D, typename U>
686 inline D Atomic::cmpxchg(T exchange_value,
687 D volatile* dest,
688 U compare_value,
689 cmpxchg_memory_order order) {
690 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
691 }
692
693 template<typename T, typename D>
694 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
695 cmpxchg_memory_order order) {
|