< prev index next >

src/hotspot/share/oops/valueKlass.cpp

Print this page




  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 int ValueKlass::first_field_offset() const {
  50 #ifdef ASSERT
  51   int first_offset = INT_MAX;
  52   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  53     if (fs.offset() < first_offset) first_offset= fs.offset();
  54   }
  55 #endif
  56   int base_offset = instanceOopDesc::base_offset_in_bytes();
  57   // The first field of value types is aligned on a long boundary
  58   base_offset = align_up(base_offset, BytesPerLong);
  59   assert(base_offset == first_offset, "inconsistent offsets");
  60   return base_offset;
  61 }
  62 
  63 int ValueKlass::raw_value_byte_size() const {
  64   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  65   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  66   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  67   if (heapOopAlignedSize >= longSize || contains_oops()) {
  68     return heapOopAlignedSize;
  69   }


 175   if (or_null) {
 176     return vak->array_klass_or_null(storage_props, rank);
 177   }
 178   return vak->array_klass(storage_props, rank, THREAD);
 179 }
 180 
 181 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 182   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 183     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 184   }
 185   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 186 }
 187 
 188 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 189   InstanceKlass::array_klasses_do(f);
 190   if (get_value_array_klass() != NULL)
 191     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 192 }
 193 
 194 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {

 195   /*
 196    * Try not to shear fields even if not an atomic store...
 197    *
 198    * First 3 cases handle value array store, otherwise works on the same basis
 199    * as JVM_Clone, at this size data is aligned. The order of primitive types
 200    * is largest to smallest, and it not possible for fields to stradle long
 201    * copy boundaries.
 202    *
 203    * If MT without exclusive access, possible to observe partial value store,
 204    * but not partial primitive and reference field values
 205    */
 206   switch (raw_byte_size) {
 207     case 1:
 208       *((jbyte*) dst) = *(jbyte*)src;
 209       break;
 210     case 2:
 211       *((jshort*) dst) = *(jshort*)src;
 212       break;
 213     case 4:
 214       *((jint*) dst) = *(jint*) src;
 215       break;
 216     default:
 217       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 218       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 219   }






































 220 }
 221 
 222 /*
 223  * Store the value of this klass contained with src into dst.
 224  *
 225  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 226  *
 227  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 228  * so raw point is "safe" for now.
 229  *
 230  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 231  *
 232  */
 233 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 234   if (contains_oops()) {
 235     if (dst_heap) {
 236       // src/dst aren't oops, need offset to adjust oop map offset
 237       const address dst_oop_addr = ((address) dst) - first_field_offset();
 238 
 239       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());




  29 #include "interpreter/interpreter.hpp"
  30 #include "logging/log.hpp"
  31 #include "memory/metadataFactory.hpp"
  32 #include "oops/access.hpp"
  33 #include "oops/compressedOops.inline.hpp"
  34 #include "oops/fieldStreams.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "oops/valueKlass.hpp"
  40 #include "oops/valueArrayKlass.hpp"
  41 #include "runtime/fieldDescriptor.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 int ValueKlass::first_field_offset_old() const {
  50 #ifdef ASSERT
  51   int first_offset = INT_MAX;
  52   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
  53     if (fs.offset() < first_offset) first_offset= fs.offset();
  54   }
  55 #endif
  56   int base_offset = instanceOopDesc::base_offset_in_bytes();
  57   // The first field of value types is aligned on a long boundary
  58   base_offset = align_up(base_offset, BytesPerLong);
  59   assert(base_offset == first_offset, "inconsistent offsets");
  60   return base_offset;
  61 }
  62 
  63 int ValueKlass::raw_value_byte_size() const {
  64   int heapOopAlignedSize = nonstatic_field_size() << LogBytesPerHeapOop;
  65   // If bigger than 64 bits or needs oop alignment, then use jlong aligned
  66   // which for values should be jlong aligned, asserts in raw_field_copy otherwise
  67   if (heapOopAlignedSize >= longSize || contains_oops()) {
  68     return heapOopAlignedSize;
  69   }


 175   if (or_null) {
 176     return vak->array_klass_or_null(storage_props, rank);
 177   }
 178   return vak->array_klass(storage_props, rank, THREAD);
 179 }
 180 
 181 Klass* ValueKlass::allocate_value_array_klass(TRAPS) {
 182   if (flatten_array() && (is_atomic() || (!ValueArrayAtomicAccess))) {
 183     return ValueArrayKlass::allocate_klass(ArrayStorageProperties::flattened_and_null_free, this, THREAD);
 184   }
 185   return ObjArrayKlass::allocate_objArray_klass(ArrayStorageProperties::null_free, 1, this, THREAD);
 186 }
 187 
 188 void ValueKlass::array_klasses_do(void f(Klass* k)) {
 189   InstanceKlass::array_klasses_do(f);
 190   if (get_value_array_klass() != NULL)
 191     ArrayKlass::cast(get_value_array_klass())->array_klasses_do(f);
 192 }
 193 
 194 void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) {
 195   if (!UseNewLayout) {
 196     /*
 197      * Try not to shear fields even if not an atomic store...
 198      *
 199      * First 3 cases handle value array store, otherwise works on the same basis
 200      * as JVM_Clone, at this size data is aligned. The order of primitive types
 201      * is largest to smallest, and it not possible for fields to stradle long
 202      * copy boundaries.
 203      *
 204      * If MT without exclusive access, possible to observe partial value store,
 205      * but not partial primitive and reference field values
 206      */
 207     switch (raw_byte_size) {
 208     case 1:
 209       *((jbyte*) dst) = *(jbyte*)src;
 210       break;
 211     case 2:
 212       *((jshort*) dst) = *(jshort*)src;
 213       break;
 214     case 4:
 215       *((jint*) dst) = *(jint*) src;
 216       break;
 217     default:
 218       assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size");
 219       Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong);
 220     }
 221   } else {
 222     int size = this->get_exact_size_in_bytes();
 223     int length;
 224     switch (this->get_alignment()) {
 225     case BytesPerLong:
 226       length = size >> LogBytesPerLong;
 227       if (length > 0) {
 228         Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length);
 229         size -= length << LogBytesPerLong;
 230         src = (jlong*)src + length;
 231         dst = (jlong*)dst + length;
 232       }
 233       // Fallthrough
 234     case BytesPerInt:
 235       length = size >> LogBytesPerInt;
 236       if (length > 0) {
 237         Copy::conjoint_jints_atomic((jint*)src, (jint*)dst, length);
 238         size -= length << LogBytesPerInt;
 239         src = (jint*)src + length;
 240         dst = (jint*)dst + length;
 241       }
 242       // Fallthrough
 243     case BytesPerShort:
 244       length = size >> LogBytesPerShort;
 245       if (length > 0) {
 246         Copy::conjoint_jshorts_atomic((jshort*)src, (jshort*)dst, length);
 247         size -= length << LogBytesPerShort;
 248         src = (jshort*)src + length;
 249         dst = (jshort*)dst +length;
 250       }
 251       // Fallthrough
 252     case 1:
 253       if (size > 0) Copy::conjoint_jbytes_atomic((jbyte*)src, (jbyte*)dst, size);
 254       break;
 255     default:
 256       fatal("Unsupported alignment");
 257     }
 258   }
 259 }
 260 
 261 /*
 262  * Store the value of this klass contained with src into dst.
 263  *
 264  * This operation is appropriate for use from vastore, vaload and putfield (for values)
 265  *
 266  * GC barriers currently can lock with no safepoint check and allocate c-heap,
 267  * so raw point is "safe" for now.
 268  *
 269  * Going forward, look to use machine generated (stub gen or bc) version for most used klass layouts
 270  *
 271  */
 272 void ValueKlass::value_store(void* src, void* dst, size_t raw_byte_size, bool dst_heap, bool dst_uninitialized) {
 273   if (contains_oops()) {
 274     if (dst_heap) {
 275       // src/dst aren't oops, need offset to adjust oop map offset
 276       const address dst_oop_addr = ((address) dst) - first_field_offset();
 277 
 278       ModRefBarrierSet* bs = barrier_set_cast<ModRefBarrierSet>(BarrierSet::barrier_set());


< prev index next >