rev 13281 : imported patch Atomic_refactoring
rev 13283 : imported patch Atomic_polishing_v2
rev 13284 : [mq]: Atomic_aliasing_1

   1 /*
   2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
  26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/enableIf.hpp"
  31 #include "metaprogramming/integerTypes.hpp"
  32 #include "metaprogramming/isIntegral.hpp"
  33 #include "metaprogramming/isPointer.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/macros.hpp"
  37 
  38 enum cmpxchg_memory_order {
  39   memory_order_relaxed,
  40   // Use value which doesn't interfere with C++2011. We need to be more conservative.
  41   memory_order_conservative = 8
  42 };
  43 
  44 class Atomic : AllStatic {
  45   template<typename T> class Never: public FalseType {};
  46   typedef char* CanonicalPointer;
  47 
  48   // The type is CanonicalPointer for pointers, otherwise canonical integer
  49   template<typename T>
  50   struct CanonicalType : AllStatic {
  51     typedef typename Conditional<IsPointer<T>::value, CanonicalPointer, typename IntegerTypes::Signed<T>::type>::type type;
  52   };
  53 
  54   template<typename T>
  55   static typename EnableIf<IsPointer<T>::value, CanonicalPointer>::type
  56   cast_to_canonical(T value) { return reinterpret_cast<CanonicalPointer>(value); }
  57 
  58   template<typename T>
  59   static typename EnableIf<!IsPointer<T>::value, typename IntegerTypes::Signed<T>::type>::type
  60   cast_to_canonical(T value) { return IntegerTypes::cast_to_signed(value); }
  61 
  62   template<typename T, typename U>
  63   static typename EnableIf<IsPointer<U>::value, T>::type cast_from_canonical(U value) {
  64     return reinterpret_cast<T>(value);
  65   }
  66 
  67   template<typename T, typename U>
  68   static typename EnableIf<!IsPointer<U>::value, T>::type cast_from_canonical(U value) {
  69     return IntegerTypes::cast<T>(value);
  70   }
  71 
  72   template <typename T>
  73   inline static void specialized_store(T store_value, volatile T* dest) {
  74     STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
  75     (void)const_cast<T&>(*dest = store_value);
  76   }
  77 
  78   template <typename T>
  79   inline static T specialized_load(const volatile T* dest) {
  80     STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
  81     return *dest;
  82   }
  83 
  84   template <typename T>
  85   inline static T specialized_add(T add_value, volatile T* dest) {
  86     STATIC_ASSERT(Never<T>::value);
  87     return add_value;
  88   }
  89 
  90   template <typename T>
  91   inline static void specialized_inc(volatile T* dest) {
  92     add(1, dest);
  93   }
  94 
  95   template <typename T>
  96   inline static void specialized_dec(volatile T* dest) {
  97     add(-1, dest);
  98   }
  99 
 100   // If the platform does not offer a specialization for pointers,
 101   // try using the canonical pointer integer instead
 102   template <typename T>
 103   inline static typename EnableIf<IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
 104     typedef typename IntegerTypes::Signed<T>::type Raw;
 105     Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value), reinterpret_cast<volatile Raw*>(dest));
 106     return IntegerTypes::cast<T>(result);
 107   }
 108 
 109   template <typename T>
 110   inline static typename EnableIf<!IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
 111     STATIC_ASSERT(Never<T>::value);
 112     return exchange_value;
 113   }
 114 
 115   template <typename T>
 116   inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) {
 117     STATIC_ASSERT(Never<T>::value);
 118     return exchange_value;
 119   }
 120 
 121  public:
 122   // Atomic operations on 64-bit types are not available on all 32-bit
 123   // platforms. If atomic ops on 64-bit types are defined here they must only
 124   // be used from code that verifies they are available at runtime and
 125   // can provide an alternative action if not - see supports_cx8() for
 126   // a means to test availability.
 127 
 128   // The memory operations that are mentioned with each of the atomic
 129   // function families come from src/share/vm/runtime/orderAccess.hpp,
 130   // e.g., <fence> is described in that file and is implemented by the
 131   // OrderAccess::fence() function. See that file for the gory details
 132   // on the Memory Access Ordering Model.
 133 
 134   // All of the atomic operations that imply a read-modify-write action
 135   // guarantee a two-way memory barrier across that operation. Historically
 136   // these semantics reflect the strength of atomic operations that are
 137   // provided on SPARC/X86. We assume that strength is necessary unless
 138   // we can prove that a weaker form is sufficiently safe.
 139 
 140   // Atomically store to a location
 141   // See comment above about using 64-bit atomics on 32-bit platforms
 142   template <typename T, typename U>
 143   inline static void store(T store_value, volatile U* dest);
 144 
 145   // The store_ptr() member functions are deprecated. Use store() instead.
 146   static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
 147     store(store_value, dest);
 148   }
 149 
 150   static void store_ptr(void*    store_value, volatile void*     dest) {
 151     store((intptr_t)store_value, (volatile intptr_t*)dest);
 152   }
 153 
 154   // Atomically load from a location
 155   // See comment above about using 64-bit atomics on 32-bit platforms
 156   template <typename T>
 157   inline static T load(volatile T* src);
 158 
 159   // Atomically add to a location. Returns updated value. add*() provide:
 160   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
 161   // add(I1 v, I* d)
 162   // add(I1 v, P* d)
 163   // where I, I1 are integral types, P is a pointer type.
 164   // Functional behavior is modelled on *dest += add_value.
 165   template <typename T, typename U>
 166   inline static U add(T add_value, volatile U* dst);
 167 
 168   template <typename T, typename U>
 169   inline static U* add(T add_value, U* volatile* dst);
 170 
 171   // The add_ptr() member functions are deprecated. Use add() instead.
 172   static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 173     return add(add_value, dest);
 174   }
 175 
 176   static void*    add_ptr(intptr_t add_value, volatile void*     dest) {
 177     return (void*)add(add_value, (volatile intptr_t*)dest);
 178   }
 179 
 180   // Atomically increment location. inc*() provide:
 181   // <fence> increment-dest <membar StoreLoad|StoreStore>
 182   // Functional behavior is modelled on *dest++
 183   template <typename T>
 184   inline static void inc(volatile T* dest);
 185 
 186   template <typename T>
 187   inline static void inc(T* volatile* dest);
 188 
 189   // The inc_ptr member functions are deprecated. Use inc() instead.
 190   static void inc_ptr(volatile intptr_t* dest) {
 191     inc(dest);
 192   }
 193 
 194   static void inc_ptr(volatile void*     dest) {
 195     inc((volatile intptr_t*)dest);
 196   }
 197 
 198   // Atomically decrement a location. dec*() provide:
 199   // <fence> decrement-dest <membar StoreLoad|StoreStore>
 200   // Functional behavior is modelled on *dest--
 201   template <typename T>
 202   inline static void dec(volatile T* dest);
 203 
 204   template <typename T>
 205   inline static void dec(T* volatile* dest);
 206 
 207   // The dec_ptr member functions are deprecated. Use dec() instead.
 208   static void dec_ptr(volatile intptr_t* dest) {
 209     dec(dest);
 210   }
 211 
 212   static void dec_ptr(volatile void*     dest) {
 213     dec((volatile intptr_t*)dest);
 214   }
 215 
 216   // Performs atomic exchange of *dest with exchange_value. Returns old
 217   // prior value of *dest. xchg*() provide:
 218   // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
 219   template <typename T, typename U>
 220   inline static U xchg(T exchange_value, volatile U* dest);
 221 
 222   // The xchg_ptr() member functions are deprecated. Use xchg() instead.
 223   static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
 224     return xchg(exchange_value, dest);
 225   }
 226 
 227   static void*    xchg_ptr(void*    exchange_value, volatile void*     dest) {
 228     return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest);
 229   }
 230 
 231   // Performs atomic compare of *dest and compare_value, and exchanges
 232   // *dest with exchange_value if the comparison succeeded. Returns prior
 233   // value of *dest. cmpxchg*() provide:
 234   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
 235   // See comment above about using 64-bit atomics on 32-bit platforms
 236   template <typename T, typename U, typename V>
 237   inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative);
 238 
 239   // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead.
 240   inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t*  dest,
 241                                      intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) {
 242     return cmpxchg(exchange_value, dest, compare_value, order);
 243 
 244   }
 245 
 246   inline static void*    cmpxchg_ptr(void*    exchange_value, volatile void*      dest,
 247                                      void*    compare_value, cmpxchg_memory_order order = memory_order_conservative) {
 248     return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
 249   }
 250 };
 251 
 252 // internal implementation
 253 
 254 template <typename T, typename U>
 255 inline void Atomic::store(T store_value, volatile U* dest) {
 256   typedef typename IntegerTypes::Signed<U>::type Raw;
 257   U store_value_cast = store_value;
 258   specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast<volatile Raw*>(dest));
 259 }
 260 
 261 template <typename T>
 262 inline T Atomic::load(volatile T* src) {
 263   typedef typename IntegerTypes::Signed<T>::type Raw;
 264   return IntegerTypes::cast<T>(specialized_load(reinterpret_cast<const volatile Raw*>(src)));
 265 }
 266 
 267 template <typename T, typename U>
 268 inline U Atomic::add(T add_value, volatile U* dst) {
 269   STATIC_ASSERT(IsIntegral<T>::value);
 270   STATIC_ASSERT(IsIntegral<U>::value);
 271   typedef typename IntegerTypes::Signed<U>::type Raw;
 272   // Allow -Wconversion or the like to complain about unsafe conversions.
 273   U value = add_value;
 274   Raw raw_value = IntegerTypes::cast_to_signed(value);
 275   Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
 276   return IntegerTypes::cast<U>(result);
 277 }
 278 
 279 template <typename T, typename U>
 280 inline U* Atomic::add(T add_value, U* volatile* dst) {
 281   STATIC_ASSERT(IsIntegral<T>::value);
 282   typedef typename IntegerTypes::Signed<U*>::type Raw;
 283   ptrdiff_t value = add_value;
 284   Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U));
 285   Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
 286   return IntegerTypes::cast<U*>(result);
 287 }
 288 
 289 template <typename T>
 290 inline void Atomic::inc(volatile T* src) {
 291   STATIC_ASSERT(IsIntegral<T>::value);
 292   typedef typename IntegerTypes::Signed<T>::type Raw;
 293   specialized_inc(reinterpret_cast<volatile Raw*>(src));
 294 }
 295 
 296 template <typename T>
 297 inline void Atomic::inc(T* volatile* src) {
 298   if (sizeof(T) != 1) {
 299     add(1, src);
 300   } else {
 301     typedef typename IntegerTypes::Signed<T*>::type Raw;
 302     specialized_inc(reinterpret_cast<volatile Raw*>(src));
 303   }
 304 }
 305 
 306 template <typename T>
 307 inline void Atomic::dec(volatile T* src) {
 308   STATIC_ASSERT(IsIntegral<T>::value);
 309   typedef typename IntegerTypes::Signed<T>::type Raw;
 310   specialized_dec(reinterpret_cast<volatile Raw*>(src));
 311 }
 312 
 313 template <typename T>
 314 inline void Atomic::dec(T* volatile* src) {
 315   if (sizeof(T) != 1) {
 316     add(-1, src);
 317   } else {
 318     typedef typename IntegerTypes::Signed<T*>::type Raw;
 319     specialized_dec(reinterpret_cast<volatile Raw*>(src));
 320   }
 321 }
 322 
 323 template <typename T, typename U>
 324 inline U Atomic::xchg(T exchange_value, volatile U* dest) {
 325   typedef typename CanonicalType<U>::type Raw;
 326   U exchange_value_cast = exchange_value;
 327   Raw result = specialized_xchg(cast_to_canonical(exchange_value_cast),
 328                                 reinterpret_cast<volatile Raw*>(dest));
 329   return cast_from_canonical<U>(result);
 330 }
 331 
 332 template <typename T, typename U, typename V>
 333 inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
 334   typedef typename IntegerTypes::Signed<U>::type Raw;
 335   U exchange_value_cast = exchange_value;
 336   U compare_value_cast = compare_value;
 337   Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast),
 338                                    reinterpret_cast<volatile Raw*>(dest),
 339                                    IntegerTypes::cast_to_signed(compare_value_cast), order);
 340   return IntegerTypes::cast<U>(result);
 341 }
 342 
 343 // platform specific in-line definitions - must come before shared definitions
 344 
 345 #include OS_CPU_HEADER(atomic)
 346 
 347 // shared in-line definitions
 348 
 349 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 350 /*
 351  * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg
 352  * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition
 353  * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
 354  * implementation to be used instead.
 355  */
 356 template <>
 357 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest,
 358                                                   int8_t compare_value, cmpxchg_memory_order order) {
 359   volatile int32_t* dest_int =
 360       reinterpret_cast<volatile int32_t*>(align_down(dest, sizeof(int32_t)));
 361   size_t offset = pointer_delta(dest, dest_int, 1);
 362   int32_t cur = *dest_int;
 363   int8_t* cur_as_bytes = reinterpret_cast<int8_t*>(&cur);
 364 
 365   // current value may not be what we are looking for, so force it
 366   // to that value so the initial cmpxchg will fail if it is different
 367   cur_as_bytes[offset] = compare_value;
 368 
 369   // always execute a real cmpxchg so that we get the required memory
 370   // barriers even on initial failure
 371   do {
 372     // value to swap in matches current value ...
 373     int32_t new_value = cur;
 374     // ... except for the one byte we want to update
 375     reinterpret_cast<int8_t*>(&new_value)[offset] = exchange_value;
 376 
 377     int32_t res = cmpxchg(new_value, dest_int, cur, order);
 378     if (res == cur) break; // success
 379 
 380     // at least one byte in the int changed value, so update
 381     // our view of the current int
 382     cur = res;
 383     // if our byte is still as cur we loop and try again
 384   } while (cur_as_bytes[offset] == compare_value);
 385 
 386   return cur_as_bytes[offset];
 387 }
 388 
 389 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
 390 
 391 template <>
 392 inline int16_t Atomic::specialized_add<int16_t>(int16_t add_value, volatile int16_t* dest) {
 393   // Most platforms do not support atomic add on a 2-byte value. However,
 394   // if the value occupies the most significant 16 bits of an aligned 32-bit
 395   // word, then we can do this with an atomic add of (add_value << 16)
 396   // to the 32-bit word.
 397   //
 398   // The least significant parts of this 32-bit word will never be affected, even
 399   // in case of overflow/underflow.
 400   //
 401   // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
 402 #ifdef VM_LITTLE_ENDIAN
 403   assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
 404   int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1));
 405 #else
 406   assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
 407   int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest));
 408 #endif
 409   return (int16_t)(new_value >> 16); // preserves sign
 410 }
 411 
 412 template <>
 413 inline void Atomic::specialized_inc<int16_t>(volatile int16_t* dest) {
 414   (void)add(int16_t(1), dest);
 415 }
 416 
 417 template <>
 418 inline void Atomic::specialized_dec<int16_t>(volatile int16_t* dest) {
 419   (void)add(int16_t(-1), dest);
 420 }
 421 
 422 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- EOF ---