< prev index next >

src/hotspot/share/runtime/atomic.hpp

Print this page
rev 49845 : 8202080: Introduce ordering semantics for Atomic::add
Reviewed-by:

@@ -36,11 +36,14 @@
 #include "metaprogramming/removePointer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
 enum cmpxchg_memory_order {
-  memory_order_relaxed,
+  memory_order_relaxed = 0,
+  memory_order_acquire = 2,
+  memory_order_release = 3,
+  memory_order_acq_rel = 4,
   // Use value which doesn't interfere with C++2011. We need to be more conservative.
   memory_order_conservative = 8
 };
 
 class Atomic : AllStatic {

@@ -78,11 +81,12 @@
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 
   template<typename I, typename D>
-  inline static D add(I add_value, D volatile* dest);
+  inline static D add(I add_value, D volatile* dest,
+                      cmpxchg_memory_order order = memory_order_acq_rel);
 
   template<typename I, typename D>
   inline static D sub(I sub_value, D volatile* dest);
 
   // Atomically increment location. inc() provide:

@@ -486,17 +490,17 @@
 // be complete.
 
 template<typename Derived>
 struct Atomic::FetchAndAdd {
   template<typename I, typename D>
-  D operator()(I add_value, D volatile* dest) const;
+  D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
 };
 
 template<typename Derived>
 struct Atomic::AddAndFetch {
   template<typename I, typename D>
-  D operator()(I add_value, D volatile* dest) const;
+  D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
 };
 
 template<typename D>
 inline void Atomic::inc(D volatile* dest) {
   STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);

@@ -587,41 +591,42 @@
 inline void Atomic::store(T store_value, volatile D* dest) {
   StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
 }
 
 template<typename I, typename D>
-inline D Atomic::add(I add_value, D volatile* dest) {
-  return AddImpl<I, D>()(add_value, dest);
+inline D Atomic::add(I add_value, D volatile* dest,
+                     cmpxchg_memory_order order) {
+  return AddImpl<I, D>()(add_value, dest, order);
 }
 
 template<typename I, typename D>
 struct Atomic::AddImpl<
   I, D,
   typename EnableIf<IsIntegral<I>::value &&
                     IsIntegral<D>::value &&
                     (sizeof(I) <= sizeof(D)) &&
                     (IsSigned<I>::value == IsSigned<D>::value)>::type>
 {
-  D operator()(I add_value, D volatile* dest) const {
+  D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const {
     D addend = add_value;
-    return PlatformAdd<sizeof(D)>()(addend, dest);
+    return PlatformAdd<sizeof(D)>()(addend, dest, order);
   }
 };
 
 template<typename I, typename P>
 struct Atomic::AddImpl<
   I, P*,
   typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
 {
-  P* operator()(I add_value, P* volatile* dest) const {
+  P* operator()(I add_value, P* volatile* dest, cmpxchg_memory_order order) const {
     STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
     STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
     typedef typename Conditional<IsSigned<I>::value,
                                  intptr_t,
                                  uintptr_t>::type CI;
     CI addend = add_value;
-    return PlatformAdd<sizeof(P*)>()(addend, dest);
+    return PlatformAdd<sizeof(P*)>()(addend, dest, order);
   }
 };
 
 // Most platforms do not support atomic add on a 2-byte value. However,
 // if the value occupies the most significant 16 bits of an aligned 32-bit

@@ -632,42 +637,44 @@
 // in case of overflow/underflow.
 //
 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 template<>
 struct Atomic::AddImpl<short, short> {
-  short operator()(short add_value, short volatile* dest) const {
+  short operator()(short add_value, short volatile* dest, cmpxchg_memory_order order) const {
 #ifdef VM_LITTLE_ENDIAN
     assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
+    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order);
 #else
     assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
+    int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order);
 #endif
     return (short)(new_value >> 16); // preserves sign
   }
 };
 
 template<typename Derived>
 template<typename I, typename D>
-inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
+inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
+                                                  cmpxchg_memory_order order) const {
   I addend = add_value;
   // If D is a pointer type P*, scale by sizeof(P).
   if (IsPointer<D>::value) {
     addend *= sizeof(typename RemovePointer<D>::type);
   }
-  D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
+  D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
   return old + add_value;
 }
 
 template<typename Derived>
 template<typename I, typename D>
-inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
+inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
+                                                  cmpxchg_memory_order order) const {
   // If D is a pointer type P*, scale by sizeof(P).
   if (IsPointer<D>::value) {
     add_value *= sizeof(typename RemovePointer<D>::type);
   }
-  return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
+  return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
 }
 
 template<typename Type, typename Fn, typename I, typename D>
 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
   return PrimitiveConversions::cast<D>(
< prev index next >