< prev index next >

src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp

Print this page
rev 49845 : 8202080: Introduce ordering semantics for Atomic::add
Reviewed-by:

@@ -75,60 +75,87 @@
 #define strasm_acquire                    strasm_lwsync
 #define strasm_fence                      strasm_sync
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
+inline void pre_membar(cmpxchg_memory_order order) {
+  switch (order) {
+    case memory_order_relaxed:
+    case memory_order_acquire: break;
+    case memory_order_release:
+    case memory_order_acq_rel: __asm__ __volatile__ (strasm_lwsync); break;
+    default                  : __asm__ __volatile__ (strasm_sync); break;
+  }
+}
+
+inline void post_membar(cmpxchg_memory_order order) {
+  switch (order) {
+    case memory_order_relaxed:
+    case memory_order_release: break;
+    case memory_order_acquire:
+    case memory_order_acq_rel: __asm__ __volatile__ (strasm_isync); break;
+    default                  : __asm__ __volatile__ (strasm_sync); break;
+  }
+}
+
+
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
-  D add_and_fetch(I add_value, D volatile* dest) const;
+  D add_and_fetch(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
 };
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+                                               cmpxchg_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
 
   D result;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    strasm_lwsync
     "1: lwarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stwcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
+  post_membar(order);
+
   return result;
 }
 
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+                                               cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
 
   D result;
 
+  pre_membar(order);
+
   __asm__ __volatile__ (
-    strasm_lwsync
     "1: ldarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stdcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
+  post_membar(order);
+
   return result;
 }
 
 template<>
 template<typename T>

@@ -205,28 +232,10 @@
     );
 
   return old_value;
 }
 
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
 template<>
 template<typename T>
 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
                                                 T volatile* dest,
                                                 T compare_value,

@@ -249,11 +258,11 @@
                      masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
                      xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
 
   unsigned int old_value, value32;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
     "   lbz     %[old_value], 0(%[dest])                  \n"
     "   cmpw    %[masked_compare_val], %[old_value]       \n"

@@ -288,11 +297,11 @@
     /* clobber */
     : "cc",
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return PrimitiveConversions::cast<T>((unsigned char)old_value);
 }
 
 template<>

@@ -308,11 +317,11 @@
   // specified otherwise (see atomic.hpp).
 
   T old_value;
   const uint64_t zero = 0;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
     "   lwz     %[old_value], 0(%[dest])                \n"
     "   cmpw    %[compare_value], %[old_value]          \n"

@@ -338,11 +347,11 @@
     /* clobber */
     : "cc",
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return old_value;
 }
 
 template<>

@@ -358,11 +367,11 @@
   // specified otherwise (see atomic.hpp).
 
   T old_value;
   const uint64_t zero = 0;
 
-  cmpxchg_pre_membar(order);
+  pre_membar(order);
 
   __asm__ __volatile__ (
     /* simple guard */
     "   ld      %[old_value], 0(%[dest])                \n"
     "   cmpd    %[compare_value], %[old_value]          \n"

@@ -388,11 +397,11 @@
     /* clobber */
     : "cc",
       "memory"
     );
 
-  cmpxchg_post_membar(order);
+  post_membar(order);
 
   return old_value;
 }
 
 #undef strasm_sync
< prev index next >