1 /* 2 * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP 26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP 27 28 #include "metaprogramming/conditional.hpp" 29 #include "metaprogramming/enableIf.hpp" 30 #include "metaprogramming/integralConstant.hpp" 31 #include "metaprogramming/isSame.hpp" 32 #include "utilities/debug.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 35 // This metafunction returns either oop or narrowOop depending on whether 36 // an access needs to use compressed oops or not. 37 template <DecoratorSet decorators> 38 struct HeapOopType: AllStatic { 39 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value && 40 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; 41 typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type; 42 }; 43 44 namespace AccessInternal { 45 enum BarrierType { 46 BARRIER_STORE, 47 BARRIER_STORE_AT, 48 BARRIER_LOAD, 49 BARRIER_LOAD_AT, 50 BARRIER_ATOMIC_CMPXCHG, 51 BARRIER_ATOMIC_CMPXCHG_AT, 52 BARRIER_ATOMIC_XCHG, 53 BARRIER_ATOMIC_XCHG_AT, 54 BARRIER_ARRAYCOPY, 55 BARRIER_CLONE, 56 BARRIER_RESOLVE 57 }; 58 59 template <DecoratorSet decorators, typename T> 60 struct MustConvertCompressedOop: public IntegralConstant<bool, 61 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value && 62 IsSame<typename HeapOopType<decorators>::type, narrowOop>::value && 63 IsSame<T, oop>::value> {}; 64 65 // This metafunction returns an appropriate oop type if the value is oop-like 66 // and otherwise returns the same type T. 67 template <DecoratorSet decorators, typename T> 68 struct EncodedType: AllStatic { 69 typedef typename Conditional< 70 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 71 typename HeapOopType<decorators>::type, T>::type type; 72 }; 73 74 template <DecoratorSet decorators> 75 inline typename HeapOopType<decorators>::type* 76 oop_field_addr(oop base, ptrdiff_t byte_offset) { 77 return reinterpret_cast<typename HeapOopType<decorators>::type*>( 78 reinterpret_cast<intptr_t>((void*)base) + byte_offset); 79 } 80 81 // This metafunction returns whether it is possible for a type T to require 82 // locking to support wide atomics or not. 83 template <typename T> 84 #ifdef SUPPORTS_NATIVE_CX8 85 struct PossiblyLockedAccess: public IntegralConstant<bool, false> {}; 86 #else 87 struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {}; 88 #endif 89 90 template <DecoratorSet decorators, typename T> 91 struct AccessFunctionTypes { 92 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); 93 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); 94 typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); 95 typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); 96 97 typedef T (*load_func_t)(void* addr); 98 typedef void (*store_func_t)(void* addr, T value); 99 typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); 100 typedef T (*atomic_xchg_func_t)(T new_value, void* addr); 101 102 typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length); 103 typedef void (*clone_func_t)(oop src, oop dst, size_t size); 104 typedef oop (*resolve_func_t)(oop obj); 105 }; 106 107 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {}; 108 109 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \ 110 template <DecoratorSet decorators, typename T> \ 111 struct AccessFunction<decorators, T, bt>: AllStatic{ \ 112 typedef typename AccessFunctionTypes<decorators, T>::func type; \ 113 } 114 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t); 115 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t); 116 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t); 117 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t); 118 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t); 119 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t); 120 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t); 121 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t); 122 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t); 123 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t); 124 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t); 125 #undef ACCESS_GENERATE_ACCESS_FUNCTION 126 127 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 128 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier(); 129 130 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 131 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier(); 132 133 class AccessLocker VALUE_OBJ_CLASS_SPEC { 134 public: 135 AccessLocker(); 136 ~AccessLocker(); 137 }; 138 bool wide_atomic_needs_locking(); 139 140 void* field_addr(oop base, ptrdiff_t offset); 141 142 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow 143 // faster build times, given how frequently included access is. 144 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length); 145 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length); 146 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length); 147 148 void arraycopy_disjoint_words(void* src, void* dst, size_t length); 149 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length); 150 151 template<typename T> 152 void arraycopy_conjoint(T* src, T* dst, size_t length); 153 template<typename T> 154 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length); 155 template<typename T> 156 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length); 157 } 158 159 // This mask specifies what decorators are relevant for raw accesses. When passing 160 // accesses to the raw layer, irrelevant decorators are removed. 161 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK | 162 ARRAYCOPY_DECORATOR_MASK | OOP_DECORATOR_MASK; 163 164 // The RawAccessBarrier performs raw accesses with additional knowledge of 165 // memory ordering, so that OrderAccess/Atomic is called when necessary. 166 // It additionally handles compressed oops, and hence is not completely "raw" 167 // strictly speaking. 168 template <DecoratorSet decorators> 169 class RawAccessBarrier: public AllStatic { 170 protected: 171 static inline void* field_addr(oop base, ptrdiff_t byte_offset) { 172 return AccessInternal::field_addr(base, byte_offset); 173 } 174 175 protected: 176 // Only encode if INTERNAL_VALUE_IS_OOP 177 template <DecoratorSet idecorators, typename T> 178 static inline typename EnableIf< 179 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, 180 typename HeapOopType<idecorators>::type>::type 181 encode_internal(T value); 182 183 template <DecoratorSet idecorators, typename T> 184 static inline typename EnableIf< 185 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 186 encode_internal(T value) { 187 return value; 188 } 189 190 template <typename T> 191 static inline typename AccessInternal::EncodedType<decorators, T>::type 192 encode(T value) { 193 return encode_internal<decorators, T>(value); 194 } 195 196 // Only decode if INTERNAL_VALUE_IS_OOP 197 template <DecoratorSet idecorators, typename T> 198 static inline typename EnableIf< 199 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 200 decode_internal(typename HeapOopType<idecorators>::type value); 201 202 template <DecoratorSet idecorators, typename T> 203 static inline typename EnableIf< 204 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 205 decode_internal(T value) { 206 return value; 207 } 208 209 template <typename T> 210 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) { 211 return decode_internal<decorators, T>(value); 212 } 213 214 protected: 215 template <DecoratorSet ds, typename T> 216 static typename EnableIf< 217 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 218 load_internal(void* addr); 219 220 template <DecoratorSet ds, typename T> 221 static typename EnableIf< 222 HasDecorator<ds, MO_ACQUIRE>::value, T>::type 223 load_internal(void* addr); 224 225 template <DecoratorSet ds, typename T> 226 static typename EnableIf< 227 HasDecorator<ds, MO_RELAXED>::value, T>::type 228 load_internal(void* addr); 229 230 template <DecoratorSet ds, typename T> 231 static inline typename EnableIf< 232 HasDecorator<ds, MO_VOLATILE>::value, T>::type 233 load_internal(void* addr) { 234 return *reinterpret_cast<const volatile T*>(addr); 235 } 236 237 template <DecoratorSet ds, typename T> 238 static inline typename EnableIf< 239 HasDecorator<ds, MO_UNORDERED>::value, T>::type 240 load_internal(void* addr) { 241 return *reinterpret_cast<const T*>(addr); 242 } 243 244 template <DecoratorSet ds, typename T> 245 static typename EnableIf< 246 HasDecorator<ds, MO_SEQ_CST>::value>::type 247 store_internal(void* addr, T value); 248 249 template <DecoratorSet ds, typename T> 250 static typename EnableIf< 251 HasDecorator<ds, MO_RELEASE>::value>::type 252 store_internal(void* addr, T value); 253 254 template <DecoratorSet ds, typename T> 255 static typename EnableIf< 256 HasDecorator<ds, MO_RELAXED>::value>::type 257 store_internal(void* addr, T value); 258 259 template <DecoratorSet ds, typename T> 260 static inline typename EnableIf< 261 HasDecorator<ds, MO_VOLATILE>::value>::type 262 store_internal(void* addr, T value) { 263 (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value); 264 } 265 266 template <DecoratorSet ds, typename T> 267 static inline typename EnableIf< 268 HasDecorator<ds, MO_UNORDERED>::value>::type 269 store_internal(void* addr, T value) { 270 *reinterpret_cast<T*>(addr) = value; 271 } 272 273 template <DecoratorSet ds, typename T> 274 static typename EnableIf< 275 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 276 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 277 278 template <DecoratorSet ds, typename T> 279 static typename EnableIf< 280 HasDecorator<ds, MO_RELAXED>::value, T>::type 281 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 282 283 template <DecoratorSet ds, typename T> 284 static typename EnableIf< 285 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 286 atomic_xchg_internal(T new_value, void* addr); 287 288 // The following *_locked mechanisms serve the purpose of handling atomic operations 289 // that are larger than a machine can handle, and then possibly opt for using 290 // a slower path using a mutex to perform the operation. 291 292 template <DecoratorSet ds, typename T> 293 static inline typename EnableIf< 294 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 295 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { 296 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); 297 } 298 299 template <DecoratorSet ds, typename T> 300 static typename EnableIf< 301 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 302 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value); 303 304 template <DecoratorSet ds, typename T> 305 static inline typename EnableIf< 306 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 307 atomic_xchg_maybe_locked(T new_value, void* addr) { 308 return atomic_xchg_internal<ds>(new_value, addr); 309 } 310 311 template <DecoratorSet ds, typename T> 312 static typename EnableIf< 313 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 314 atomic_xchg_maybe_locked(T new_value, void* addr); 315 316 public: 317 template <typename T> 318 static inline void store(void* addr, T value) { 319 store_internal<decorators>(addr, value); 320 } 321 322 template <typename T> 323 static inline T load(void* addr) { 324 return load_internal<decorators, T>(addr); 325 } 326 327 template <typename T> 328 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 329 return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value); 330 } 331 332 template <typename T> 333 static inline T atomic_xchg(T new_value, void* addr) { 334 return atomic_xchg_maybe_locked<decorators>(new_value, addr); 335 } 336 337 template <typename T> 338 static bool arraycopy(T* src, T* dst, size_t length); 339 340 template <typename T> 341 static void oop_store(void* addr, T value); 342 template <typename T> 343 static void oop_store_at(oop base, ptrdiff_t offset, T value); 344 345 template <typename T> 346 static T oop_load(void* addr); 347 template <typename T> 348 static T oop_load_at(oop base, ptrdiff_t offset); 349 350 template <typename T> 351 static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value); 352 template <typename T> 353 static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); 354 355 template <typename T> 356 static T oop_atomic_xchg(T new_value, void* addr); 357 template <typename T> 358 static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); 359 360 template <typename T> 361 static void store_at(oop base, ptrdiff_t offset, T value) { 362 store(field_addr(base, offset), value); 363 } 364 365 template <typename T> 366 static T load_at(oop base, ptrdiff_t offset) { 367 return load<T>(field_addr(base, offset)); 368 } 369 370 template <typename T> 371 static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 372 return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); 373 } 374 375 template <typename T> 376 static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 377 return atomic_xchg(new_value, field_addr(base, offset)); 378 } 379 380 template <typename T> 381 static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length); 382 static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length); 383 384 static void clone(oop src, oop dst, size_t size); 385 386 static oop resolve(oop obj) { return obj; } 387 }; 388 389 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP