< prev index next >

src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp

Print this page
rev 49845 : 8202080: Introduce ordering semantics for Atomic::add
Reviewed-by:

@@ -30,16 +30,17 @@
 template<size_t byte_size>
 struct Atomic::PlatformAdd
   : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
 {
   template<typename I, typename D>
-  D fetch_and_add(I add_value, D volatile* dest) const;
+  D fetch_and_add(I add_value, D volatile* dest, cmpxchg_memory_order order) const;
 };
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+                                               cmpxchg_memory_order order) const {
   STATIC_ASSERT(4 == sizeof(I));
   STATIC_ASSERT(4 == sizeof(D));
   D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
                     : "=r" (old_value)

@@ -90,11 +91,12 @@
 
 #ifdef AMD64
 
 template<>
 template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+                                               cmpxchg_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
   STATIC_ASSERT(8 == sizeof(D));
   D old_value;
   __asm__ __volatile__ ("lock xaddq %0,(%2)"
                         : "=r" (old_value)

@@ -103,12 +105,11 @@
   return old_value;
 }
 
 template<>
 template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
-                                             T volatile* dest) const {
+inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
   __asm__ __volatile__ ("xchgq (%2),%0"
                         : "=r" (exchange_value)
                         : "0" (exchange_value), "r" (dest)
                         : "memory");
< prev index next >