< prev index next >

src/hotspot/share/oops/valueKlass.cpp

Print this page




  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 int ValueKlass::first_field_offset() const {



  50 #ifdef ASSERT
  51   int first_offset = INT_MAX;
  52   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  53     if (fs.offset() < first_offset) first_offset= fs.offset();
  54   }
  55 #endif
  56   int base_offset = instanceOopDesc::base_offset_in_bytes();
  57   // The first field of value types is aligned on a long boundary
  58   base_offset = align_up(base_offset, BytesPerLong);
  59   assert(base_offset == first_offset, "inconsistent offsets");
  60   return base_offset;
  61 }
  62 
  63 int ValueKlass::raw_value_byte_size() const {
  64   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  65   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  66   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  67   if (heapOopAlignedSize >= longSize || contains_oops()) {
  68     return heapOopAlignedSize;
  69   }


 175   if (or_null) {
 176     return vak->array_klass_or_null(storage_props, rank);
 177   }
 178   return vak->array_klass(storage_props, rank, THREAD);
 179 }
 180 
 181 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 182   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 183     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 184   }
 185   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 186 }
 187 
 188 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 189   InstanceKlass::array_klasses_do(f);
 190   if (get_value_array_klass() != NULL)
 191     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 192 }
 193 
 194 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {

 195   /*
 196    * Try not to shear fields even if not an atomic store...
 197    *
 198    * First 3 cases handle value array store, otherwise works on the same basis
 199    * as JVM_Clone, at this size data is aligned. The order of primitive types
 200    * is largest to smallest, and it not possible for fields to stradle long
 201    * copy boundaries.
 202    *
 203    * If MT without exclusive access, possible to observe partial value store,
 204    * but not partial primitive and reference field values
 205    */
 206   switch (raw_byte_size) {
 207     case 1:
 208       *((jbyte*) dst) = *(jbyte*)src;
 209       break;
 210     case 2:
 211       *((jshort*) dst) = *(jshort*)src;
 212       break;
 213     case 4:
 214       *((jint*) dst) = *(jint*) src;
 215       break;
 216     default:
 217       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 218       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 219   }






































 220 }
 221 
 222 /*
 223  * Store the value of this klass contained with src into dst.
 224  *
 225  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 226  *
 227  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 228  * so raw point is "safe" for now.
 229  *
 230  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 231  *
 232  */
 233 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 234   if (contains_oops()) {
 235     if (dst_heap) {
 236       // src/dst aren't oops, need offset to adjust oop map offset
 237       const address dst_oop_addr = ((address) dst) - first_field_offset();
 238 
 239       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());




  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 int ValueKlass::first_field_offset() const {
  50   if (UseNewLayout) {
  51     return get_first_field_offset();
  52   }
  53 #ifdef ASSERT
  54   int first_offset = INT_MAX;
  55   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  56     if (fs.offset() < first_offset) first_offset= fs.offset();
  57   }
  58 #endif
  59   int base_offset = instanceOopDesc::base_offset_in_bytes();
  60   // The first field of value types is aligned on a long boundary
  61   base_offset = align_up(base_offset, BytesPerLong);
  62   assert(base_offset == first_offset, "inconsistent offsets");
  63   return base_offset;
  64 }
  65 
  66 int ValueKlass::raw_value_byte_size() const {
  67   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  68   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  69   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  70   if (heapOopAlignedSize >= longSize || contains_oops()) {
  71     return heapOopAlignedSize;
  72   }


 178   if (or_null) {
 179     return vak->array_klass_or_null(storage_props, rank);
 180   }
 181   return vak->array_klass(storage_props, rank, THREAD);
 182 }
 183 
 184 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 185   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 186     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 187   }
 188   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 189 }
 190 
 191 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 192   InstanceKlass::array_klasses_do(f);
 193   if (get_value_array_klass() != NULL)
 194     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 195 }
 196 
 197 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 198   if (!UseNewLayout) {
 199     /*
 200      * Try not to shear fields even if not an atomic store...
 201      *
 202      * First 3 cases handle value array store, otherwise works on the same basis
 203      * as JVM_Clone, at this size data is aligned. The order of primitive types
 204      * is largest to smallest, and it not possible for fields to stradle long
 205      * copy boundaries.
 206      *
 207      * If MT without exclusive access, possible to observe partial value store,
 208      * but not partial primitive and reference field values
 209      */
 210     switch (raw_byte_size) {
 211     case 1:
 212       *((jbyte*) dst) = *(jbyte*)src;
 213       break;
 214     case 2:
 215       *((jshort*) dst) = *(jshort*)src;
 216       break;
 217     case 4:
 218       *((jint*) dst) = *(jint*) src;
 219       break;
 220     default:
 221       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 222       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 223     }
 224   } else {
 225     int size = this->get_exact_size_in_bytes();
 226     int length;
 227     switch (this->get_alignment()) {
 228     case BytesPerLong:
 229       length = size >> LogBytesPerLong;
 230       if (length > 0) {
 231         Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length);
 232         size -= length << LogBytesPerLong;
 233         src = (jlong*)src + length;
 234         dst = (jlong*)dst + length;
 235       }
 236       // Fallthrough
 237     case BytesPerInt:
 238       length = size >> LogBytesPerInt;
 239       if (length > 0) {
 240         Copy::conjoint_jints_atomic((jint*)src, (jint*)dst, length);
 241         size -= length << LogBytesPerInt;
 242         src = (jint*)src + length;
 243         dst = (jint*)dst + length;
 244       }
 245       // Fallthrough
 246     case BytesPerShort:
 247       length = size >> LogBytesPerShort;
 248       if (length > 0) {
 249         Copy::conjoint_jshorts_atomic((jshort*)src, (jshort*)dst, length);
 250         size -= length << LogBytesPerShort;
 251         src = (jshort*)src + length;
 252         dst = (jshort*)dst +length;
 253       }
 254       // Fallthrough
 255     case 1:
 256       if (size > 0) Copy::conjoint_jbytes_atomic((jbyte*)src, (jbyte*)dst, size);
 257       break;
 258     default:
 259       fatal("Unsupported alignment");
 260     }
 261   }
 262 }
 263 
 264 /*
 265  * Store the value of this klass contained with src into dst.
 266  *
 267  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 268  *
 269  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 270  * so raw point is "safe" for now.
 271  *
 272  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 273  *
 274  */
 275 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 276   if (contains_oops()) {
 277     if (dst_heap) {
 278       // src/dst aren't oops, need offset to adjust oop map offset
 279       const address dst_oop_addr = ((address) dst) - first_field_offset();
 280 
 281       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());


< prev index next >