--- old/src/hotspot/share/classfile/classFileParser.cpp 2019-07-19 10:38:27.000000000 -0400 +++ new/src/hotspot/share/classfile/classFileParser.cpp 2019-07-19 10:38:26.000000000 -0400 @@ -21,6 +21,7 @@ * questions. * */ +#include #include "precompiled.hpp" #include "jvm.h" #include "aot/aotLoader.hpp" @@ -60,6 +61,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/os.hpp" @@ -1084,58 +1086,6 @@ } } -class AnnotationCollector : public ResourceObj{ -public: - enum Location { _in_field, _in_method, _in_class }; - enum ID { - _unknown = 0, - _method_CallerSensitive, - _method_ForceInline, - _method_DontInline, - _method_InjectedProfile, - _method_LambdaForm_Compiled, - _method_Hidden, - _method_HotSpotIntrinsicCandidate, - _jdk_internal_vm_annotation_Contended, - _field_Stable, - _jdk_internal_vm_annotation_ReservedStackAccess, - _annotation_LIMIT - }; - const Location _location; - int _annotations_present; - u2 _contended_group; - - AnnotationCollector(Location location) - : _location(location), _annotations_present(0) - { - assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, ""); - } - // If this annotation name has an ID, report it (or _none). - ID annotation_index(const ClassLoaderData* loader_data, const Symbol* name); - // Set the annotation name: - void set_annotation(ID id) { - assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); - _annotations_present |= nth_bit((int)id); - } - - void remove_annotation(ID id) { - assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); - _annotations_present &= ~nth_bit((int)id); - } - - // Report if the annotation is present. - bool has_any_annotations() const { return _annotations_present != 0; } - bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; } - - void set_contended_group(u2 group) { _contended_group = group; } - u2 contended_group() const { return _contended_group; } - - bool is_contended() const { return has_annotation(_jdk_internal_vm_annotation_Contended); } - - void set_stable(bool stable) { set_annotation(_field_Stable); } - bool is_stable() const { return has_annotation(_field_Stable); } -}; - // This class also doubles as a holder for metadata cleanup. class ClassFileParser::FieldAnnotationCollector : public AnnotationCollector { private: @@ -1163,12 +1113,6 @@ void apply_to(const methodHandle& m); }; -class ClassFileParser::ClassAnnotationCollector : public AnnotationCollector{ -public: - ClassAnnotationCollector() : AnnotationCollector(_in_class) { } - void apply_to(InstanceKlass* ik); -}; - static int skip_annotation_value(const u1*, int, int); // fwd decl @@ -3886,141 +3830,121 @@ } #endif -// Values needed for oopmap and InstanceKlass creation -class ClassFileParser::FieldLayoutInfo : public ResourceObj { - public: - OopMapBlocksBuilder* oop_map_blocks; - int instance_size; - int nonstatic_field_size; - int static_field_size; - bool has_nonstatic_fields; -}; - -// Utility to collect and compact oop maps during layout -class ClassFileParser::OopMapBlocksBuilder : public ResourceObj { - public: - OopMapBlock* nonstatic_oop_maps; - unsigned int nonstatic_oop_map_count; - unsigned int max_nonstatic_oop_maps; - - public: - OopMapBlocksBuilder(unsigned int max_blocks, TRAPS) { - max_nonstatic_oop_maps = max_blocks; - nonstatic_oop_map_count = 0; - if (max_blocks == 0) { - nonstatic_oop_maps = NULL; - } else { - nonstatic_oop_maps = NEW_RESOURCE_ARRAY_IN_THREAD( +OopMapBlocksBuilder::OopMapBlocksBuilder(unsigned int max_blocks, TRAPS) { + max_nonstatic_oop_maps = max_blocks; + nonstatic_oop_map_count = 0; + if (max_blocks == 0) { + nonstatic_oop_maps = NULL; + } else { + nonstatic_oop_maps = NEW_RESOURCE_ARRAY_IN_THREAD( THREAD, OopMapBlock, max_nonstatic_oop_maps); - memset(nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks); - } + memset(nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks); } +} - OopMapBlock* last_oop_map() const { - assert(nonstatic_oop_map_count > 0, "Has no oop maps"); - return nonstatic_oop_maps + (nonstatic_oop_map_count - 1); - } +OopMapBlock* OopMapBlocksBuilder::last_oop_map() const { + assert(nonstatic_oop_map_count > 0, "Has no oop maps"); + return nonstatic_oop_maps + (nonstatic_oop_map_count - 1); +} + +// addition of super oop maps +void OopMapBlocksBuilder::initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) { + assert(nof_blocks && nonstatic_oop_map_count == 0 && + nof_blocks <= max_nonstatic_oop_maps, "invariant"); - // addition of super oop maps - void initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) { - assert(nof_blocks && nonstatic_oop_map_count == 0 && - nof_blocks <= max_nonstatic_oop_maps, "invariant"); + memcpy(nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks); + nonstatic_oop_map_count += nof_blocks; +} - memcpy(nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks); - nonstatic_oop_map_count += nof_blocks; +// collection of oops +void OopMapBlocksBuilder::add(int offset, int count) { + if (nonstatic_oop_map_count == 0) { + nonstatic_oop_map_count++; + } + OopMapBlock* nonstatic_oop_map = last_oop_map(); + if (nonstatic_oop_map->count() == 0) { // Unused map, set it up + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); + } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add + nonstatic_oop_map->increment_count(count); + } else { // Need a new one... + nonstatic_oop_map_count++; + assert(nonstatic_oop_map_count <= max_nonstatic_oop_maps, "range check"); + nonstatic_oop_map = last_oop_map(); + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); } +} - // collection of oops - void add(int offset, int count) { - if (nonstatic_oop_map_count == 0) { - nonstatic_oop_map_count++; - } - OopMapBlock* nonstatic_oop_map = last_oop_map(); - if (nonstatic_oop_map->count() == 0) { // Unused map, set it up - nonstatic_oop_map->set_offset(offset); - nonstatic_oop_map->set_count(count); - } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add - nonstatic_oop_map->increment_count(count); - } else { // Need a new one... - nonstatic_oop_map_count++; - assert(nonstatic_oop_map_count <= max_nonstatic_oop_maps, "range check"); - nonstatic_oop_map = last_oop_map(); - nonstatic_oop_map->set_offset(offset); - nonstatic_oop_map->set_count(count); - } +// general purpose copy, e.g. into allocated instanceKlass +void OopMapBlocksBuilder::copy(OopMapBlock* dst) { + if (nonstatic_oop_map_count != 0) { + memcpy(dst, nonstatic_oop_maps, sizeof(OopMapBlock) * nonstatic_oop_map_count); } +} - // general purpose copy, e.g. into allocated instanceKlass - void copy(OopMapBlock* dst) { - if (nonstatic_oop_map_count != 0) { - memcpy(dst, nonstatic_oop_maps, sizeof(OopMapBlock) * nonstatic_oop_map_count); - } +// Sort and compact adjacent blocks +void OopMapBlocksBuilder::compact(TRAPS) { + if (nonstatic_oop_map_count <= 1) { + return; + } + /* + * Since field layout sneeks in oops before values, we will be able to condense + * blocks. There is potential to compact between super, own refs and values + * containing refs. + * + * Currently compaction is slightly limited due to values being 8 byte aligned. + * This may well change: FixMe if doesn't, the code below is fairly general purpose + * and maybe it doesn't need to be. + */ + qsort(nonstatic_oop_maps, nonstatic_oop_map_count, sizeof(OopMapBlock), + (_sort_Fn)OopMapBlock::compare_offset); + if (nonstatic_oop_map_count < 2) { + return; } - // Sort and compact adjacent blocks - void compact(TRAPS) { - if (nonstatic_oop_map_count <= 1) { - return; - } - /* - * Since field layout sneeks in oops before values, we will be able to condense - * blocks. There is potential to compact between super, own refs and values - * containing refs. - * - * Currently compaction is slightly limited due to values being 8 byte aligned. - * This may well change: FixMe if doesn't, the code below is fairly general purpose - * and maybe it doesn't need to be. - */ - qsort(nonstatic_oop_maps, nonstatic_oop_map_count, sizeof(OopMapBlock), - (_sort_Fn)OopMapBlock::compare_offset); - if (nonstatic_oop_map_count < 2) { - return; + //Make a temp copy, and iterate through and copy back into the orig + ResourceMark rm(THREAD); + OopMapBlock* oop_maps_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, OopMapBlock, + nonstatic_oop_map_count); + OopMapBlock* oop_maps_copy_end = oop_maps_copy + nonstatic_oop_map_count; + copy(oop_maps_copy); + OopMapBlock* nonstatic_oop_map = nonstatic_oop_maps; + unsigned int new_count = 1; + oop_maps_copy++; + while(oop_maps_copy < oop_maps_copy_end) { + assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant"); + if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) { + nonstatic_oop_map->increment_count(oop_maps_copy->count()); + } else { + nonstatic_oop_map++; + new_count++; + nonstatic_oop_map->set_offset(oop_maps_copy->offset()); + nonstatic_oop_map->set_count(oop_maps_copy->count()); } - - //Make a temp copy, and iterate through and copy back into the orig - ResourceMark rm(THREAD); - OopMapBlock* oop_maps_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, OopMapBlock, - nonstatic_oop_map_count); - OopMapBlock* oop_maps_copy_end = oop_maps_copy + nonstatic_oop_map_count; - copy(oop_maps_copy); - OopMapBlock* nonstatic_oop_map = nonstatic_oop_maps; - unsigned int new_count = 1; oop_maps_copy++; - while(oop_maps_copy < oop_maps_copy_end) { - assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant"); - if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) { - nonstatic_oop_map->increment_count(oop_maps_copy->count()); - } else { - nonstatic_oop_map++; - new_count++; - nonstatic_oop_map->set_offset(oop_maps_copy->offset()); - nonstatic_oop_map->set_count(oop_maps_copy->count()); - } - oop_maps_copy++; - } - assert(new_count <= nonstatic_oop_map_count, "end up with more maps after compact() ?"); - nonstatic_oop_map_count = new_count; } + assert(new_count <= nonstatic_oop_map_count, "end up with more maps after compact() ?"); + nonstatic_oop_map_count = new_count; +} - void print_on(outputStream* st) const { - st->print_cr(" OopMapBlocks: %3d /%3d", nonstatic_oop_map_count, max_nonstatic_oop_maps); - if (nonstatic_oop_map_count > 0) { - OopMapBlock* map = nonstatic_oop_maps; - OopMapBlock* last_map = last_oop_map(); - assert(map <= last_map, "Last less than first"); - while (map <= last_map) { - st->print_cr(" Offset: %3d -%3d Count: %3d", map->offset(), - map->offset() + map->offset_span() - heapOopSize, map->count()); - map++; - } +void OopMapBlocksBuilder::print_on(outputStream* st) const { + st->print_cr(" OopMapBlocks: %3d /%3d", nonstatic_oop_map_count, max_nonstatic_oop_maps); + if (nonstatic_oop_map_count > 0) { + OopMapBlock* map = nonstatic_oop_maps; + OopMapBlock* last_map = last_oop_map(); + assert(map <= last_map, "Last less than first"); + while (map <= last_map) { + st->print_cr(" Offset: %3d -%3d Count: %3d", map->offset(), + map->offset() + map->offset_span() - heapOopSize, map->count()); + map++; } } +} - void print_value_on(outputStream* st) const { - print_on(st); - } - -}; +void OopMapBlocksBuilder::print_value_on(outputStream* st) const { + print_on(st); +} void ClassFileParser::throwValueTypeLimitation(THREAD_AND_LOCATION_DECL, const char* msg, @@ -4668,6 +4592,11 @@ static_fields_end); nonstatic_oop_maps->print_on(tty); tty->print("\n"); + tty->print_cr("Instance size = %d", instance_size); + tty->print_cr("Nonstatic_field_size = %d", nonstatic_field_size); + tty->print_cr("Static_field_size = %d", static_field_size); + tty->print_cr("Has nonstatic fields = %d", has_nonstatic_fields); + tty->print_cr("---"); } #endif @@ -6042,6 +5971,9 @@ } if (is_value_type()) { + ValueKlass::cast(ik)->set_alignment(_alignment); + ValueKlass::cast(ik)->set_first_field_offset(_first_field_offset); + ValueKlass::cast(ik)->set_exact_size_in_bytes(_exact_size_in_bytes); ValueKlass::cast(ik)->initialize_calling_convention(CHECK); } @@ -6734,9 +6666,21 @@ } _field_info = new FieldLayoutInfo(); - layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK); + if (UseNewLayout) { + FieldLayoutBuilder lb(this, _field_info); + if (this->is_value_type()) { + lb.compute_inline_class_layout(CHECK); + _alignment = lb.get_alignment(); + _first_field_offset = lb.get_first_field_offset(); + _exact_size_in_bytes = lb.get_exact_size_in_byte(); + } else { + lb.compute_regular_layout(CHECK); + } + } else { + layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK); +} - // Compute reference typ + // Compute reference type _rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type(); } --- old/src/hotspot/share/classfile/classFileParser.hpp 2019-07-19 10:38:28.000000000 -0400 +++ new/src/hotspot/share/classfile/classFileParser.hpp 2019-07-19 10:38:28.000000000 -0400 @@ -45,18 +45,105 @@ class InstanceKlass; class Symbol; class TempNewSymbol; +class FieldLayoutBuilder; + + +class AnnotationCollector : public ResourceObj{ +public: + enum Location { _in_field, _in_method, _in_class }; + enum ID { + _unknown = 0, + _method_CallerSensitive, + _method_ForceInline, + _method_DontInline, + _method_InjectedProfile, + _method_LambdaForm_Compiled, + _method_Hidden, + _method_HotSpotIntrinsicCandidate, + _jdk_internal_vm_annotation_Contended, + _field_Stable, + _jdk_internal_vm_annotation_ReservedStackAccess, + _annotation_LIMIT + }; + const Location _location; + int _annotations_present; + u2 _contended_group; + + AnnotationCollector(Location location) + : _location(location), _annotations_present(0) + { + assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, ""); + } + // If this annotation name has an ID, report it (or _none). + ID annotation_index(const ClassLoaderData* loader_data, const Symbol* name); + // Set the annotation name: + void set_annotation(ID id) { + assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); + _annotations_present |= nth_bit((int)id); + } + + void remove_annotation(ID id) { + assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob"); + _annotations_present &= ~nth_bit((int)id); + } + + // Report if the annotation is present. + bool has_any_annotations() const { return _annotations_present != 0; } + bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; } + + void set_contended_group(u2 group) { _contended_group = group; } + u2 contended_group() const { return _contended_group; } + + bool is_contended() const { return has_annotation(_jdk_internal_vm_annotation_Contended); } + + void set_stable(bool stable) { set_annotation(_field_Stable); } + bool is_stable() const { return has_annotation(_field_Stable); } +}; + +// Utility to collect and compact oop maps during layout +class OopMapBlocksBuilder : public ResourceObj { +public: + OopMapBlock* nonstatic_oop_maps; + unsigned int nonstatic_oop_map_count; + unsigned int max_nonstatic_oop_maps; + + public: + OopMapBlocksBuilder(unsigned int max_blocks, TRAPS); + OopMapBlock* last_oop_map() const; + void initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks); + void add(int offset, int count); + void copy(OopMapBlock* dst); + void compact(TRAPS); + void print_on(outputStream* st) const; + void print_value_on(outputStream* st) const; +}; + +// Values needed for oopmap and InstanceKlass creation +class FieldLayoutInfo : public ResourceObj { + public: + OopMapBlocksBuilder* oop_map_blocks; + int instance_size; + int nonstatic_field_size; + int static_field_size; + bool has_nonstatic_fields; +}; // Parser for for .class files // // The bytes describing the class file structure is read from a Stream object class ClassFileParser { + friend class FieldLayoutBuilder; + friend class FieldLayout; + - class ClassAnnotationCollector; + class ClassAnnotationCollector : public AnnotationCollector { + public: + ClassAnnotationCollector() : AnnotationCollector(_in_class) { } + void apply_to(InstanceKlass* ik); + }; class FieldAllocationCount; class FieldAnnotationCollector; - class FieldLayoutInfo; - class OopMapBlocksBuilder; public: // The ClassFileParser has an associated "publicity" level @@ -125,6 +212,10 @@ int _num_miranda_methods; + int _alignment; + int _first_field_offset; + int _exact_size_in_bytes; + ReferenceType _rt; Handle _protection_domain; AccessFlags _access_flags; --- old/src/hotspot/share/memory/heapInspection.cpp 2019-07-19 10:38:29.000000000 -0400 +++ new/src/hotspot/share/memory/heapInspection.cpp 2019-07-19 10:38:28.000000000 -0400 @@ -33,7 +33,9 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/reflectionAccessorImplKlassHelper.hpp" +#include "oops/valueKlass.hpp" #include "runtime/os.hpp" +#include "runtime/fieldDescriptor.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/stack.inline.hpp" @@ -690,6 +692,137 @@ } }; + +class FindClassByNameClosure : public KlassInfoClosure { + private: + GrowableArray* _klasses; + Symbol* _classname; + public: + FindClassByNameClosure(GrowableArray* klasses, Symbol* classname) : + _klasses(klasses), _classname(classname) { } + + void do_cinfo(KlassInfoEntry* cie) { + if (cie->klass()->name() == _classname) { + _klasses->append(cie->klass()); + } + } +}; + +class FieldDesc { +private: + Symbol* _name; + Symbol* _signature; + int _offset; + int _index; + InstanceKlass* _holder; + AccessFlags _access_flags; + public: + FieldDesc() { + _name = NULL; + _signature = NULL; + _offset = -1; + _index = -1; + _holder = NULL; + _access_flags = AccessFlags(); + } + FieldDesc(fieldDescriptor& fd) { + _name = fd.name(); + _signature = fd.signature(); + _offset = fd.offset(); + _index = fd.index(); + _holder = fd.field_holder(); + _access_flags = fd.access_flags(); + } + Symbol* name() { return _name;} + Symbol* signature() { return _signature; } + int offset() { return _offset; } + int index() { return _index; } + InstanceKlass* holder() { return _holder; } + AccessFlags access_flags() { return _access_flags; } +}; + +static int compare_offset(FieldDesc* f1, FieldDesc* f2) { + return f1->offset() > f2->offset() ? 1 : -1; +} + +static void print_field(outputStream* st, int level, int offset, FieldDesc& fd, bool flattenable, bool flattened ) { + char* flattened_msg = (char*)""; + if (flattenable) { + flattened_msg = flattened ? (char*)"and flattened" : (char*)"not flattened"; + } + st->print_cr(" @ %d %*s \"%s\" %s %s %s", + offset, level * 3, "", + fd.name()->as_C_string(), + fd.signature()->as_C_string(), + flattenable ? " // flattenable" : "", + flattened_msg); +} + +static void print_flattened_field(outputStream* st, int level, int offset, InstanceKlass* klass) { + assert(klass->is_value(), "Only value classes can be flattened"); + ValueKlass* vklass = ValueKlass::cast(klass); + GrowableArray* fields = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(100, true); + for (FieldStream fd(klass, false, false); !fd.eos(); fd.next()) { + if (!fd.access_flags().is_static()) { + fields->append(FieldDesc(fd.field_descriptor())); + } + } + fields->sort(compare_offset); + for(int i = 0; i < fields->length(); i++) { + FieldDesc fd = fields->at(i); + int offset2 = offset + fd.offset() - vklass->first_field_offset(); + print_field(st, level, offset2, fd, + fd.access_flags().is_flattenable(), fd.holder()->field_is_flattened(fd.index())); + if (fd.holder()->field_is_flattened(fd.index())) { + print_flattened_field(st, level + 1, offset2 , + InstanceKlass::cast(fd.holder()->get_value_field_klass(fd.index()))); + } + } +} + +void PrintClassLayout::print_class_layout(outputStream* st, char* class_name) { + KlassInfoTable cit(true); + if (cit.allocation_failed()) { + st->print_cr("ERROR: Ran out of C-heap; hierarchy not generated"); + return; + } + + Thread* THREAD = Thread::current(); + + Symbol* classname = SymbolTable::probe(class_name, strlen(class_name)); + + GrowableArray* klasses = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(100, true); + + FindClassByNameClosure fbnc(klasses, classname); + cit.iterate(&fbnc); + + for(int i = 0; i < klasses->length(); i++) { + Klass* klass = klasses->at(i); + if (!klass->is_instance_klass()) continue; // Skip + InstanceKlass* ik = InstanceKlass::cast(klass); + int tab = 1; + st->print_cr("Class %s [@%s]:", klass->name()->as_C_string(), + klass->class_loader_data()->name()->as_C_string()); + ResourceMark rm; + GrowableArray* fields = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(100, true); + for (FieldStream fd(ik, false, false); !fd.eos(); fd.next()) { + if (!fd.access_flags().is_static()) { + fields->append(FieldDesc(fd.field_descriptor())); + } + } + fields->sort(compare_offset); + for(int i = 0; i < fields->length(); i++) { + FieldDesc fd = fields->at(i); + print_field(st, 0, fd.offset(), fd, fd.access_flags().is_flattenable(), fd.holder()->field_is_flattened(fd.index())); + if (fd.holder()->field_is_flattened(fd.index())) { + print_flattened_field(st, 1, fd.offset(), + InstanceKlass::cast(fd.holder()->get_value_field_klass(fd.index()))); + } + } + } + st->cr(); +} + class RecordInstanceClosure : public ObjectClosure { private: KlassInfoTable* _cit; --- old/src/hotspot/share/memory/heapInspection.hpp 2019-07-19 10:38:30.000000000 -0400 +++ new/src/hotspot/share/memory/heapInspection.hpp 2019-07-19 10:38:29.000000000 -0400 @@ -341,6 +341,11 @@ void sort(); }; +class PrintClassLayout : AllStatic { + public: + static void print_class_layout(outputStream* st, char* classname); +}; + #endif // INCLUDE_SERVICES // These declarations are needed since the declaration of KlassInfoTable and --- old/src/hotspot/share/oops/instanceKlass.cpp 2019-07-19 10:38:30.000000000 -0400 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2019-07-19 10:38:30.000000000 -0400 @@ -1123,7 +1123,16 @@ { for (AllFieldStream fs(this); !fs.done(); fs.next()) { if (fs.is_flattenable()) { - InstanceKlass* field_klass = InstanceKlass::cast(this->get_value_field_klass(fs.index())); + Klass* klass = this->get_value_field_klass_or_null(fs.index()); + if (klass == NULL) { + klass = SystemDictionary::resolve_or_fail(fs.signature()->fundamental_name(THREAD), Handle(THREAD, class_loader()), + Handle(THREAD, protection_domain()), true, CHECK); + this->set_value_field_klass(fs.index(), InstanceKlass::cast(klass)); + } + if (!klass->is_value()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } + InstanceKlass* field_klass = InstanceKlass::cast(klass); field_klass->initialize(CHECK); } } --- old/src/hotspot/share/oops/instanceKlass.hpp 2019-07-19 10:38:32.000000000 -0400 +++ new/src/hotspot/share/oops/instanceKlass.hpp 2019-07-19 10:38:31.000000000 -0400 @@ -143,6 +143,9 @@ address* _unpack_handler; int* _default_value_offset; Klass** _value_array_klass; + int _alignment; + int _first_field_offset; + int _exact_size_in_bytes; friend class ValueKlass; }; @@ -175,7 +178,7 @@ loaded, // loaded and inserted in class hierarchy (but not linked yet) linked, // successfully linked/verified (but not initialized yet) being_initialized, // currently running class initializer - fully_initialized, // initialized (successfull final state) + fully_initialized, // initialized (successful final state) initialization_error // error happened during initialization }; --- old/src/hotspot/share/oops/instanceOop.hpp 2019-07-19 10:38:32.000000000 -0400 +++ new/src/hotspot/share/oops/instanceOop.hpp 2019-07-19 10:38:32.000000000 -0400 @@ -46,7 +46,7 @@ static bool contains_field_offset(int offset, int nonstatic_field_size, bool is_value) { int base_in_bytes = base_offset_in_bytes(); - if (is_value) { + if (is_value && !UseNewLayout) { // The first field of value types is aligned on a long boundary base_in_bytes = align_up(base_in_bytes, BytesPerLong); } --- old/src/hotspot/share/oops/valueKlass.cpp 2019-07-19 10:38:33.000000000 -0400 +++ new/src/hotspot/share/oops/valueKlass.cpp 2019-07-19 10:38:33.000000000 -0400 @@ -47,6 +47,9 @@ #include "utilities/copy.hpp" int ValueKlass::first_field_offset() const { + if (UseNewLayout) { + return get_first_field_offset(); + } #ifdef ASSERT int first_offset = INT_MAX; for (JavaFieldStream fs(this); !fs.done(); fs.next()) { @@ -192,18 +195,19 @@ } void ValueKlass::raw_field_copy(void* src, void* dst, size_t raw_byte_size) { - /* - * Try not to shear fields even if not an atomic store... - * - * First 3 cases handle value array store, otherwise works on the same basis - * as JVM_Clone, at this size data is aligned. The order of primitive types - * is largest to smallest, and it not possible for fields to stradle long - * copy boundaries. - * - * If MT without exclusive access, possible to observe partial value store, - * but not partial primitive and reference field values - */ - switch (raw_byte_size) { + if (!UseNewLayout) { + /* + * Try not to shear fields even if not an atomic store... + * + * First 3 cases handle value array store, otherwise works on the same basis + * as JVM_Clone, at this size data is aligned. The order of primitive types + * is largest to smallest, and it not possible for fields to stradle long + * copy boundaries. + * + * If MT without exclusive access, possible to observe partial value store, + * but not partial primitive and reference field values + */ + switch (raw_byte_size) { case 1: *((jbyte*) dst) = *(jbyte*)src; break; @@ -216,6 +220,44 @@ default: assert(raw_byte_size % sizeof(jlong) == 0, "Unaligned raw_byte_size"); Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, raw_byte_size >> LogBytesPerLong); + } + } else { + int size = this->get_exact_size_in_bytes(); + int length; + switch (this->get_alignment()) { + case BytesPerLong: + length = size >> LogBytesPerLong; + if (length > 0) { + Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length); + size -= length << LogBytesPerLong; + src = (jlong*)src + length; + dst = (jlong*)dst + length; + } + // Fallthrough + case BytesPerInt: + length = size >> LogBytesPerInt; + if (length > 0) { + Copy::conjoint_jints_atomic((jint*)src, (jint*)dst, length); + size -= length << LogBytesPerInt; + src = (jint*)src + length; + dst = (jint*)dst + length; + } + // Fallthrough + case BytesPerShort: + length = size >> LogBytesPerShort; + if (length > 0) { + Copy::conjoint_jshorts_atomic((jshort*)src, (jshort*)dst, length); + size -= length << LogBytesPerShort; + src = (jshort*)src + length; + dst = (jshort*)dst +length; + } + // Fallthrough + case 1: + if (size > 0) Copy::conjoint_jbytes_atomic((jbyte*)src, (jbyte*)dst, size); + break; + default: + fatal("Unsupported alignment"); + } } } --- old/src/hotspot/share/oops/valueKlass.hpp 2019-07-19 10:38:34.000000000 -0400 +++ new/src/hotspot/share/oops/valueKlass.hpp 2019-07-19 10:38:34.000000000 -0400 @@ -119,6 +119,49 @@ Klass* allocate_value_array_klass(TRAPS); + address adr_alignment() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _alignment)); + } + + address adr_first_field_offset() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _first_field_offset)); + } + + address adr_exact_size_in_bytes() const { + assert(_adr_valueklass_fixed_block != NULL, "Should have been initialized"); + return ((address)_adr_valueklass_fixed_block) + in_bytes(byte_offset_of(ValueKlassFixedBlock, _exact_size_in_bytes)); + } + + public: + int get_alignment() const { + return *(int*)adr_alignment(); + } + + void set_alignment(int alignment) { + *(int*)adr_alignment() = alignment; + } + + int get_first_field_offset() const { + int offset = *(int*)adr_first_field_offset(); + assert(offset != 0, "Must be initialized before use"); + return *(int*)adr_first_field_offset(); + } + + void set_first_field_offset(int offset) { + *(int*)adr_first_field_offset() = offset; + } + + int get_exact_size_in_bytes() { + return *(int*)adr_exact_size_in_bytes(); + } + + void set_exact_size_in_bytes(int exact_size) { + *(int*)adr_exact_size_in_bytes() = exact_size; + } + + private: int collect_fields(GrowableArray* sig, int base_off = 0) const; void cleanup_blobs(); --- old/src/hotspot/share/opto/compile.cpp 2019-07-19 10:38:35.000000000 -0400 +++ new/src/hotspot/share/opto/compile.cpp 2019-07-19 10:38:35.000000000 -0400 @@ -1944,6 +1944,15 @@ else t = TypeOopPtr::make_from_klass_raw(field->holder()); AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); + if(!((field->is_final() || field->is_stable()) == !atp->is_rewritable())) { + ResourceMark rm; + tty->print_cr("Problematic field: %s %s.%s", + field->signature()->as_utf8(), + field->holder()->name()->as_utf8(), + field->name()->as_utf8()); + tty->print_cr("is_final = %d is_stable = %d is_rewritable = %d", + field->is_final(), field->is_stable(), atp->is_rewritable()); + } assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct"); return atp; } --- old/src/hotspot/share/runtime/globals.hpp 2019-07-19 10:38:36.000000000 -0400 +++ new/src/hotspot/share/runtime/globals.hpp 2019-07-19 10:38:36.000000000 -0400 @@ -2499,6 +2499,17 @@ \ develop(bool, ScalarizeValueTypes, true, \ "Scalarize value types in compiled code") \ + \ + product(bool, PrintNewLayout, false, \ + "Print layout compute by new algorithm") \ + \ + product(bool, PrintFlattenableLayouts, false, \ + "Print layout of inline classes and classes with " \ + "flattenable fields") \ + \ + product(bool, UseNewLayout, true, \ + "Use new algorithm to compute layouts") \ + \ --- old/src/hotspot/share/runtime/vmOperations.cpp 2019-07-19 10:38:38.000000000 -0400 +++ new/src/hotspot/share/runtime/vmOperations.cpp 2019-07-19 10:38:37.000000000 -0400 @@ -523,4 +523,8 @@ void VM_PrintClassHierarchy::doit() { KlassHierarchy::print_class_hierarchy(_out, _print_interfaces, _print_subclasses, _classname); } + +void VM_PrintClassLayout::doit() { + PrintClassLayout::print_class_layout(_out, _class_name); +} #endif --- old/src/hotspot/share/runtime/vmOperations.hpp 2019-07-19 10:38:38.000000000 -0400 +++ new/src/hotspot/share/runtime/vmOperations.hpp 2019-07-19 10:38:38.000000000 -0400 @@ -127,7 +127,7 @@ template(ScavengeMonitors) \ template(PrintMetadata) \ template(GTestExecuteAtSafepoint) \ - template(VTBufferStats) \ + template(ClassPrintLayout) \ class VM_Operation: public CHeapObj { public: @@ -505,6 +505,16 @@ void doit(); }; +class VM_PrintClassLayout: public VM_Operation { + private: + outputStream* _out; + char* _class_name; + public: + VM_PrintClassLayout(outputStream* st, char* class_name): _out(st), _class_name(class_name) {} + VMOp_Type type() const { return VMOp_PrintClassHierarchy; } + void doit(); +}; + #if INCLUDE_SERVICES class VM_PrintClassHierarchy: public VM_Operation { private: --- old/src/hotspot/share/services/diagnosticCommand.cpp 2019-07-19 10:38:39.000000000 -0400 +++ new/src/hotspot/share/services/diagnosticCommand.cpp 2019-07-19 10:38:39.000000000 -0400 @@ -96,6 +96,7 @@ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); #if INCLUDE_JVMTI // Both JVMTI and SERVICES have to be enabled to have this dcmd DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); @@ -1077,6 +1078,29 @@ #endif +PrintClassLayoutDCmd::PrintClassLayoutDCmd(outputStream* output, bool heap) : + DCmdWithParser(output, heap), + _classname("classname", "Name of class whose layout should be printed. ", + "STRING", true) { + _dcmdparser.add_dcmd_argument(&_classname); +} + +void PrintClassLayoutDCmd::execute(DCmdSource source, TRAPS) { + VM_PrintClassLayout printClassLayoutOp(output(), _classname.value()); + VMThread::execute(&printClassLayoutOp); +} + +int PrintClassLayoutDCmd::num_arguments() { + ResourceMark rm; + PrintClassLayoutDCmd* dcmd = new PrintClassLayoutDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } else { + return 0; + } +} + class VM_DumpTouchedMethods : public VM_Operation { private: outputStream* _out; --- old/src/hotspot/share/services/diagnosticCommand.hpp 2019-07-19 10:38:40.000000000 -0400 +++ new/src/hotspot/share/services/diagnosticCommand.hpp 2019-07-19 10:38:40.000000000 -0400 @@ -425,6 +425,32 @@ virtual void execute(DCmdSource source, TRAPS); }; +class PrintClassLayoutDCmd : public DCmdWithParser { +protected: + DCmdArgument _classname; // lass name whose layout should be printed. +public: + PrintClassLayoutDCmd(outputStream* output, bool heap); + static const char* name() { + return "VM.class_print_layout"; + } + static const char* description() { + return "Print the layout of an instance of a class, including flattened fields. " + "The name of each class is followed by the ClassLoaderData* of its ClassLoader, " + "or \"null\" if loaded by the bootstrap class loader."; + } + static const char* impact() { + return "Medium: Depends on number of loaded classes."; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); +}; + + class TouchedMethodsDCmd : public DCmdWithParser { public: TouchedMethodsDCmd(outputStream* output, bool heap); --- /dev/null 2019-07-19 10:38:41.000000000 -0400 +++ new/src/hotspot/share/classfile/fieldLayoutBuilder.cpp 2019-07-19 10:38:41.000000000 -0400 @@ -0,0 +1,837 @@ +/* + * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include +#include "precompiled.hpp" +#include "jvm.h" +#include "classfile/classFileParser.hpp" +#include "memory/resourceArea.hpp" +#include "oops/array.hpp" +#include "oops/instanceMirrorKlass.hpp" +#include "oops/valueKlass.hpp" +#include "runtime/fieldDescriptor.inline.hpp" + +RawBlock::RawBlock(Kind kind, int size, int alignment) { + _next_field = NULL; + _prev_field = NULL; + _next_block = NULL; + _prev_block = NULL; + _field_index = -1; // no field + assert(kind != REGULAR && kind != FLATTENED, + "Otherwise, should use the constructor with a field index argument"); + _kind = kind; + _size = size; + _alignment = alignment; + _offset = -1; + _is_reference = false; + _value_klass = NULL; + assert(_alignment > 0, "Sanity check"); +} + +RawBlock::RawBlock(int index, Kind kind, int size, int alignment, bool is_reference) { + _next_field = NULL; + _prev_field = NULL; + _next_block = NULL; + _prev_block = NULL; + _field_index = index; + assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED, + "Other kind do not have a field index"); + _kind = kind; + _size = size; + _alignment = alignment; + _offset = -1; + _is_reference = is_reference; + _value_klass = NULL; + assert(_size > 0, "Sanity check"); + assert(_alignment > 0, "Sanity check"); +} + +bool RawBlock::fit(int size, int alignment) { + int adjustment = _offset % alignment; + return _size >= size + adjustment; +} + +FieldGroup::FieldGroup(int contended_group) { + _next = NULL; + _primitive_fields = NULL; + _oop_fields = NULL; + _flattened_fields = NULL; + _contended_group = contended_group; // -1 means no contended group, 0 means default contended group + _oop_count = 0; +} + +void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) { + int size = type2aelembytes(type); + RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for primitive types */, false); + add_block(&_primitive_fields, block); +} + +void FieldGroup::add_oop_field(AllFieldStream fs) { + int size = type2aelembytes(T_OBJECT); + RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for oops */, true); + add_block(&_oop_fields, block); + _oop_count++; +} + +void FieldGroup::add_flattened_field(AllFieldStream fs, ValueKlass* vk) { + // _flattened_fields list might be merged with the _primitive_fields list in the future + RawBlock* block = new RawBlock(fs.index(), RawBlock::FLATTENED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false); + block->set_value_klass(vk); + add_block(&_flattened_fields, block); +} + +/* Adds a field to a field group. Inside a field group, fields are sorted by + * decreasing sizes. Fields with the same size are sorted according to their + * order of insertion (easy hack to respect field order for classes with + * hard coded offsets). + */ +void FieldGroup::add_block(RawBlock** list, RawBlock* block) { + if (*list == NULL) { + *list = block; + } else { + if (block->size() > (*list)->size()) { // cannot be >= to respect order of field (for classes with hard coded offsets) + block->set_next_field(*list); + (*list)->set_prev_field(block); + *list = block; + } else { + RawBlock* b = *list; + while (b->next_field() != NULL) { + if (b->next_field()->size() < block->size()) { + break; + } + b = b->next_field(); + } + block->set_next_field(b->next_field()); + block->set_prev_field(b); + b->set_next_field(block); + if (b->next_field() != NULL) { + b->next_field()->set_prev_field(block); + } + } + } +} + +FieldLayout::FieldLayout(Array* fields, ConstantPool* cp) { + _fields = fields; + _cp = cp; + _blocks = NULL; + _start = _blocks; + _last = _blocks; +} + +void FieldLayout::initialize_static_layout() { + _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX); + _blocks->set_offset(0); + _last = _blocks; + _start = _blocks; + // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() is zero, because + // during bootstrapping, the size of the java.lang.Class is still not known when layout + // of static field is computed. Field offsets are fixed later when the size is known + // (see java_lang_Class::fixup_mirror()) + insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields())); + _blocks->set_offset(0); +} + +void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klasss) { + if (super_klasss == NULL) { + _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX); + _blocks->set_offset(0); + _last = _blocks; + _start = _blocks; + insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes())); + } else { + // The JVM could reconstruct the layouts of the super classes, in order to use the + // empty slots in these layouts to allocate current class' fields. However, some codes + // in the JVM are not ready yet to find fields allocated this way, so the optimization + // is not enabled yet. +#if 0 + reconstruct_layout(super_klasss); + fill_holes(super_klasss); + // _start = _last; // uncomment to fill holes in super classes layouts +#else + _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX); + _blocks->set_offset(0); + _last = _blocks; + insert(_last, new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes())); + if (super_klasss->nonstatic_field_size() > 0) { + // To take into account the space allocated to super classes' fields, this code + // uses the nonstatic_field_size() value to allocate a single INHERITED RawBlock. + // The drawback is that nonstatic_field_size() expresses the size of non-static + // fields in heapOopSize, which implies that some space could be lost at the + // end because of the rounding up of the real size. Using the exact size, with + // no rounding up, would be possible, but would require modifications to other + // codes in the JVM performing fields lookup (as they often expect this rounding + // to be applied). + RawBlock* inherited = new RawBlock(RawBlock::INHERITED, + super_klasss->nonstatic_field_size() * heapOopSize); + insert(_last, inherited); + } + _start = _last; +#endif + } +} + +RawBlock* FieldLayout::first_field_block() { + RawBlock* block = _start; + // Not sure the condition below will work well when inheriting layout with contented padding + while (block->kind() != RawBlock::INHERITED && block->kind() != RawBlock::REGULAR + && block->kind() != RawBlock::FLATTENED && block->kind() != RawBlock::PADDING) { + block = block->next_block(); + } + return block; +} + +/* The allocation logic uses a first fit strategy: the field is allocated in the + * first empty slot big enough to contain it (including padding to fit alignment + * constraints). + */ +void FieldLayout::add(RawBlock* blocks, RawBlock* start) { + if (start == NULL) { + // start = this->_blocks; + start = this->_start; + } + RawBlock* b = blocks; + RawBlock* candidate = NULL; + while (b != NULL) { + RawBlock* candidate = start; + while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(b->size(), b->alignment())) candidate = candidate->next_block(); + assert(candidate != NULL && candidate->fit(b->size(), b->alignment()), "paranoid check"); + insert_field_block(candidate, b); + b = b->next_field(); + } +} + +/* The allocation logic uses a first fit strategy: the set of fields is allocated + * in the first empty slot big enough to contain the whole set ((including padding + * to fit alignment constraints). + */ +void FieldLayout::add_contiguously(RawBlock* blocks, RawBlock* start) { + if (blocks == NULL) return; + if (start == NULL) { + start = _start; + } + // This code assumes that if the first block is well aligned, the following + // blocks would naturally be well aligned (no need for adjustment) + int size = 0; + RawBlock* b = blocks; + while (b != NULL) { + size += b->size(); + b = b->next_field(); + } + RawBlock* candidate = start; + while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(size, blocks->alignment())) candidate = candidate->next_block(); + b = blocks; + while (b != NULL) { + insert_field_block(candidate, b); + b = b->next_field(); + assert(b == NULL || (candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned"); + } +} + +RawBlock* FieldLayout::insert_field_block(RawBlock* slot, RawBlock* block) { + assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); + if (slot->offset() % block->alignment() != 0) { + int adjustment = block->alignment() - (slot->offset() % block->alignment()); + RawBlock* adj = new RawBlock(RawBlock::EMPTY, adjustment); + insert(slot, adj); + } + insert(slot, block); + if (slot->size() == 0) { + remove(slot); + } + if (UseNewLayout) { + FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset()); + } + return block; +} + +void FieldLayout::reconstruct_layout(const InstanceKlass* ik) { + // TODO: it makes no sense to support static fields, static fields go to + // the mirror, and are not impacted by static fields of the parent class + if (ik->super() != NULL) { + reconstruct_layout(InstanceKlass::cast(ik->super())); + } else { + _blocks = new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()); + _blocks->set_offset(0); + _last = _blocks; + _start = _blocks; + } + for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) { + BasicType type = vmSymbols::signature_type(fs.signature()); + // distinction between static and non-static fields is missing + if (fs.access_flags().is_static()) continue; + ik->fields_annotations(); + if (type != T_VALUETYPE) { + int size = type2aelembytes(type); + // INHERITED blocs are marked as non-reference because oop_maps are handled by their holder class + RawBlock* block = new RawBlock(fs.index(), RawBlock::INHERITED, size, size, false); + block->set_offset(fs.offset()); + insert_per_offset(block); + } else { + fatal("Not supported yet"); + } + } +} + +void FieldLayout::fill_holes(const InstanceKlass* super_klass) { + assert(_blocks != NULL, "Sanity check"); + assert(_blocks->offset() == 0, "first block must be at offset zero"); + RawBlock* b = _blocks; + while (b->next_block() != NULL) { + if (b->next_block()->offset() > (b->offset() + b->size())) { + int size = b->next_block()->offset() - (b->offset() + b->size()); + RawBlock* empty = new RawBlock(RawBlock::EMPTY, size); + empty->set_offset(b->offset() + b->size()); + empty->set_next_block(b->next_block()); + b->next_block()->set_prev_block(empty); + b->set_next_block(empty); + empty->set_prev_block(b); + } + b = b->next_block(); + } + assert(b->next_block() == NULL, "Invariant at this point"); + if (b->kind() != RawBlock::EMPTY) { + RawBlock* last = new RawBlock(RawBlock::EMPTY, INT_MAX); + last->set_offset(b->offset() + b->size()); + assert(last->offset() > 0, "Sanity check"); + b->set_next_block(last); + last->set_prev_block(b); + _last = last; + } + // Still doing the padding to have a size that can be expressed in heapOopSize + int super_end = instanceOopDesc::base_offset_in_bytes() + super_klass->nonstatic_field_size() * heapOopSize; + if (_last->offset() < super_end) { + RawBlock* padding = new RawBlock(RawBlock::PADDING, super_end - _last->offset()); + insert(_last, padding); + } +} + +RawBlock* FieldLayout::insert(RawBlock* slot, RawBlock* block) { + assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks"); + assert(slot->offset() % block->alignment() == 0, "Incompatible alignment"); + block->set_offset(slot->offset()); + slot->set_offset(slot->offset() + block->size()); + slot->set_size(slot->size() - block->size()); + block->set_prev_block(slot->prev_block()); + block->set_next_block(slot); + slot->set_prev_block(block); + if (block->prev_block() != NULL) { // suspicious test + block->prev_block()->set_next_block(block); + } + if (_blocks == slot) { + _blocks = block; + } + if (_start == slot) { + _start = block; + } + return block; +} + +void FieldLayout::insert_per_offset(RawBlock* block) { + if (_blocks == NULL) { + _blocks = block; + } else if (_blocks->offset() > block->offset()) { + block->set_next_block(_blocks); + _blocks->set_prev_block(block); + _blocks = block; + } else { + RawBlock* b = _blocks; + while (b->next_block() != NULL && b->next_block()->offset() < block->offset()) b = b->next_block(); + if (b->next_block() == NULL) { + b->set_next_block(block); + block->set_prev_block(b); + } else { + assert(b->next_block()->offset() >= block->offset(), "Sanity check"); + assert(b->next_block()->offset() > block->offset() || b->next_block()->kind() == RawBlock::EMPTY, "Sanity check"); + block->set_next_block(b->next_block()); + b->next_block()->set_prev_block(block); + block->set_prev_block(b); + b->set_next_block(block); + } + } +} + +void FieldLayout::remove(RawBlock* block) { + assert(block != NULL, "Sanity check"); + assert(block != _last, "Sanity check"); + if (_blocks == block) { + _blocks = block->next_block(); + if (_blocks != NULL) { + _blocks->set_prev_block(NULL); + } + } else { + assert(block->prev_block() != NULL, "_prev should be set for non-head blocks"); + block->prev_block()->set_next_block(block->next_block()); + block->next_block()->set_prev_block(block->prev_block()); + } + if (block == _start) { + _start = block->prev_block(); + } +} + +void FieldLayout::print(outputStream* output) { + ResourceMark rm; + RawBlock* b = _blocks; + while(b != _last) { + switch(b->kind()) { + case RawBlock::REGULAR: { + FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index()); + output->print_cr(" %d %s %d %d %s %s", + b->offset(), + "REGULAR", + b->size(), + b->alignment(), + fi->signature(_cp)->as_C_string(), + fi->name(_cp)->as_C_string()); + break; + } + case RawBlock::FLATTENED: { + FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index()); + output->print_cr(" %d %s %d %d %s %s", + b->offset(), + "FLATTENED", + b->size(), + b->alignment(), + fi->signature(_cp)->as_C_string(), + fi->name(_cp)->as_C_string()); + break; + } + case RawBlock::RESERVED: + output->print_cr(" %d %s %d", + b->offset(), + "RESERVED", + b->size()); + break; + case RawBlock::INHERITED: + output->print_cr(" %d %s %d", + b->offset(), + "INHERITED", + b->size()); + break; + case RawBlock::EMPTY: + output->print_cr(" %d %s %d", + b->offset(), + "EMPTY", + b->size()); + break; + case RawBlock::PADDING: + output->print_cr(" %d %s %d", + b->offset(), + "PADDING", + b->size()); + break; + } + b = b->next_block(); + } +} + + +FieldLayoutBuilder::FieldLayoutBuilder(ClassFileParser* cfp, FieldLayoutInfo* info) { + _cfp = cfp; + _info = info; + _fields = NULL; + _root_group = NULL; + _contended_groups = NULL; + _static_fields = NULL; + _layout = NULL; + _static_layout = NULL; + _nonstatic_oopmap_count = 0; + // Inline class specific information + _alignment = -1; + _first_field_offset = -1; + _exact_size_in_bytes = -1; + _has_nonstatic_fields = false; + _has_flattening_information = _cfp->is_value_type(); +} + +FieldGroup* FieldLayoutBuilder::get_contended_group(int g) { + assert(g>0, "must only be called for named contended groups"); + if (_contended_groups == NULL) { + _contended_groups = new FieldGroup(g); + return _contended_groups; + } + FieldGroup* group = _contended_groups; + while(group->next() != NULL) { + if (group->contended_group() == g) break; + group = group->next(); + } + if (group->contended_group() == g) return group; + group->set_next(new FieldGroup(g)); + return group->next(); +} + +void FieldLayoutBuilder::prologue() { + _layout = new FieldLayout(_cfp->_fields, _cfp->_cp); + const InstanceKlass* super_klass = _cfp->_super_klass; + _layout->initialize_instance_layout(super_klass); + if (super_klass != NULL) { + _has_nonstatic_fields = super_klass->has_nonstatic_fields(); + } + _static_layout = new FieldLayout(_cfp->_fields, _cfp->_cp); + _static_layout->initialize_static_layout(); + _static_fields = new FieldGroup(); + _root_group = new FieldGroup(); + _contended_groups = NULL; +} + +/* Field sorting for regular (non-inline) classes: + * - fields are sorted in static and non-static fields + * - non-static fields are also sorted according to their contention group + * (support of the @Contended annotation) + * - @Contended annotation is ignored for static fields + * - field flattening decisions are taken in this method + */ +void FieldLayoutBuilder::regular_field_sorting(TRAPS) { + assert(!_cfp->is_value_type(), "Should only be used for non-inline classes"); + for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) { + FieldGroup* group = NULL; + if (fs.access_flags().is_static()) { + group = _static_fields; + } else { + _has_nonstatic_fields = true; + if (fs.is_contended()) { + int g = fs.contended_group(); + if (g == 0) { + // default group means the field is alone in its contended group + group = new FieldGroup(true); + group->set_next(_contended_groups); + _contended_groups = group; + } else { + group = get_contended_group(g); + } + } else { + group = _root_group; + } + } + assert(group != NULL, "invariant"); + BasicType type = vmSymbols::signature_type(fs.signature()); + switch(type) { + case T_BYTE: + case T_CHAR: + case T_DOUBLE: + case T_FLOAT: + case T_INT: + case T_LONG: + case T_SHORT: + case T_BOOLEAN: + group->add_primitive_field(fs, type); + break; + case T_OBJECT: + case T_ARRAY: + if (group != _static_fields) _nonstatic_oopmap_count++; + group->add_oop_field(fs); + break; + case T_VALUETYPE: { + if (group == _static_fields) { + // static fields are never flattened + group->add_oop_field(fs); + } else { + _has_flattening_information = true; + // Flattening decision to be taken here + // This code assumes all verification have been performed before + // (field is a flattenable field, field's type has been loaded + // and it is an inline klass + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _cfp->_loader_data->class_loader()), + _cfp->_protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + ValueKlass* vk = ValueKlass::cast(klass); + bool flattened = (ValueFieldMaxFlatSize < 0) + || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize; + if (flattened) { + group->add_flattened_field(fs, vk); + _nonstatic_oopmap_count += vk->nonstatic_oop_map_count(); + fs.set_flattened(true); + } else { + _nonstatic_oopmap_count++; + group->add_oop_field(fs); + } + } + break; + } + default: + fatal("Something wrong?"); + } + } +} +/* Field sorting for inline classes: + * - because inline classes are immutable, the @Contended annotation is ignored + * when computing their layout (with only read operation, there's no false + * sharing issue) + * - this method also records the alignment of the field with the most + * constraining alignment, this value is then used as the alignment + * constraint when flattening this inline type into another container + * - field flattening decisions are taken in this method (those decisions are + * currently only based in the size of the fields to be flattened, the size + * of the resulting instance is not considered) + */ +void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) { + assert(_cfp->is_value_type(), "Should only be used for inline classes"); + int alignment = 1; + for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) { + FieldGroup* group = NULL; + int field_alignment = 1; + if (fs.access_flags().is_static()) { + group = _static_fields; + } else { + _has_nonstatic_fields = true; + group = _root_group; + } + assert(group != NULL, "invariant"); + BasicType type = vmSymbols::signature_type(fs.signature()); + switch(type) { + case T_BYTE: + case T_CHAR: + case T_DOUBLE: + case T_FLOAT: + case T_INT: + case T_LONG: + case T_SHORT: + case T_BOOLEAN: + if (group != _static_fields) { + field_alignment = type2aelembytes(type); // alignment == size for primitive types + } + group->add_primitive_field(fs, type); + break; + case T_OBJECT: + case T_ARRAY: + if (group != _static_fields) { + _nonstatic_oopmap_count++; + field_alignment = type2aelembytes(type); // alignment == size for oops + } + group->add_oop_field(fs); + break; + case T_VALUETYPE: { + if (group == _static_fields) { + // static fields are never flattened + group->add_oop_field(fs); + } else { + // Flattening decision to be taken here + // This code assumes all verifications have been performed before + // (field is a flattenable field, field's type has been loaded + // and it is an inline klass + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _cfp->_loader_data->class_loader()), + _cfp->_protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + ValueKlass* vk = ValueKlass::cast(klass); + bool flattened = (ValueFieldMaxFlatSize < 0) + || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize; + if (flattened) { + group->add_flattened_field(fs, vk); + _nonstatic_oopmap_count += vk->nonstatic_oop_map_count(); + field_alignment = vk->get_alignment(); + fs.set_flattened(true); + } else { + _nonstatic_oopmap_count++; + field_alignment = type2aelembytes(T_OBJECT); + group->add_oop_field(fs); + } + } + break; + } + default: + fatal("Unexpected BasicType"); + } + if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment; + } + _alignment = alignment; + if (_cfp->is_value_type() && (!_has_nonstatic_fields)) { + // There are a number of fixes required throughout the type system and JIT + _cfp->throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet"); + return; + } +} + +/* Computation of regular classes layout is an evolution of the previous default layout + * (FieldAllocationStyle 1): + * - flattened fields are allocated first (because they have potentially the + * least regular shapes, and are more likely to create empty slots between them, + * which can then be used to allocation primitive or oop fields). Allocation is + * performed from the biggest to the smallest flattened field. + * - then primitive fields (from the biggest to the smallest) + * - then oop fields are allocated contiguously (to reduce the number of oopmaps + * and reduce the work of the GC). + */ +void FieldLayoutBuilder::compute_regular_layout(TRAPS) { + bool need_tail_padding = false; + prologue(); + regular_field_sorting(CHECK); + const bool is_contended_class = _cfp->_parsed_annotations->is_contended(); + if (is_contended_class) { + RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth); + // insertion is currently easy because the current strategy doesn't try to fill holes + // in super classes layouts => the _start block is by consequence the _last_block + _layout->insert(_layout->start(), padding); + need_tail_padding = true; + } + _layout->add(_root_group->flattened_fields()); + _layout->add(_root_group->primitive_fields()); + _layout->add_contiguously(_root_group->oop_fields()); + FieldGroup* cg = _contended_groups; + while (cg != NULL) { + RawBlock* start = _layout->last_block(); + RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth); + _layout->insert(start, padding); + _layout->add(cg->flattened_fields(), start); + _layout->add(cg->primitive_fields(), start); + _layout->add(cg->oop_fields(), start); + need_tail_padding = true; + cg = cg->next(); + } + if (need_tail_padding) { + RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth); + _layout->insert(_layout->last_block(), padding); + } + _static_layout->add_contiguously(this->_static_fields->oop_fields()); + _static_layout->add(this->_static_fields->primitive_fields()); + + epilogue(); +} + +/* Computation of inline classes has a slightly different strategy than for + * regular classes. Regular classes have their oop fields allocated at the end + * of the layout to increase GC performances. Unfortunately, this strategy + * increases the number of empty slots inside an instance. Because the purpose + * of inline classes is to be embedded into other containers, it is critical + * to keep their size as small as possible. For this reason, the allocation + * strategy is: + * - flattened fields are allocated first (because they have potentially the + * least regular shapes, and are more likely to create empty slots between them, + * which can then be used to allocation primitive or oop fields). Allocation is + * performed from the biggest to the smallest flattened field. + * - then oop fields are allocated contiguously (to reduce the number of oopmaps + * and reduce the work of the GC) + * - then primitive fields (from the biggest to the smallest) + */ +void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) { + prologue(); + inline_class_field_sorting(CHECK); + if (_layout->start()->offset() % _alignment != 0) { + RawBlock* padding = new RawBlock(RawBlock::PADDING, _alignment - (_layout->start()->offset() % _alignment)); + _layout->insert(_layout->start(), padding); + _layout->set_start(padding->next_block()); + } + _first_field_offset = _layout->start()->offset(); + _layout->add(_root_group->flattened_fields()); + _layout->add_contiguously(_root_group->oop_fields()); + _layout->add(_root_group->primitive_fields()); + _exact_size_in_bytes = _layout->last_block()->offset() - _layout->start()->offset(); + + _static_layout->add_contiguously(this->_static_fields->oop_fields()); + _static_layout->add(this->_static_fields->primitive_fields()); + + epilogue(); +} + +void FieldLayoutBuilder::epilogue() { + // Computing oopmaps + int super_oop_map_count = (_cfp->_super_klass == NULL) ? 0 :_cfp->_super_klass->nonstatic_oop_map_count(); + int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count; + + OopMapBlocksBuilder* nonstatic_oop_maps = + new OopMapBlocksBuilder(max_oop_map_count, Thread::current()); + if (super_oop_map_count > 0) { + nonstatic_oop_maps->initialize_inherited_blocks(_cfp->_super_klass->start_of_nonstatic_oop_maps(), + _cfp->_super_klass->nonstatic_oop_map_count()); + } + if (_root_group->oop_fields() != NULL) { + nonstatic_oop_maps->add(_root_group->oop_fields()->offset(), _root_group->oop_count()); + } + RawBlock* ff = _root_group->flattened_fields(); + while (ff != NULL) { + ValueKlass* vklass = ff->value_klass(); + assert(vklass != NULL, "Should have been initialized"); + if (vklass->contains_oops()) { // add flatten oop maps + int diff = ff->offset() - vklass->first_field_offset(); + const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps(); + const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count(); + while (map < last_map) { + nonstatic_oop_maps->add(map->offset() + diff, map->count()); + map++; + } + } + ff = ff->next_field(); + } + FieldGroup* cg = _contended_groups; + while (cg != NULL) { + if (cg->oop_count() > 0) { + nonstatic_oop_maps->add(cg->oop_fields()->offset(), cg->oop_count()); + } + RawBlock* ff = cg->flattened_fields(); + while (ff != NULL) { + ValueKlass* vklass = ff->value_klass(); + assert(vklass != NULL, "Should have been initialized"); + if (vklass->contains_oops()) { // add flatten oop maps + int diff = ff->offset() - vklass->first_field_offset(); + const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps(); + const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count(); + while (map < last_map) { + nonstatic_oop_maps->add(map->offset() + diff, map->count()); + map++; + } + } + ff = ff->next_field(); + } + cg = cg->next(); + } + + // nonstatic_oop_maps->compact(Thread::current()); + + int instance_end = align_up(_layout->last_block()->offset(), wordSize); + int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize); + int static_fields_size = (static_fields_end - + InstanceMirrorKlass::offset_of_static_fields()) / wordSize; + int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize); + + // Pass back information needed for InstanceKlass creation + + _info->oop_map_blocks = nonstatic_oop_maps; + _info->instance_size = align_object_size(instance_end / wordSize); + _info->static_field_size = static_fields_size; + _info->nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize; + _info->has_nonstatic_fields = _has_nonstatic_fields; + + if (PrintNewLayout || (PrintFlattenableLayouts && _has_flattening_information)) { + ResourceMark rm; + tty->print_cr("Layout of class %s", _cfp->_class_name->as_C_string()); + tty->print_cr("|offset|kind|size|alignment|signature|name|"); + tty->print_cr("Instance fields:"); + _layout->print(tty); + tty->print_cr("Static fields"); + _static_layout->print(tty); + nonstatic_oop_maps->print_on(tty); + tty->print_cr("Instance size = %d * heapWordSize", _info->instance_size); + tty->print_cr("Non-static field size = %d * heapWordSize", _info->nonstatic_field_size); + tty->print_cr("Static field size = %d * heapWordSize", _info->static_field_size); + if (_cfp->is_value_type()) { + tty->print_cr("alignment = %d", _alignment); + tty->print_cr("exact_size_in_bytes = %d", _exact_size_in_bytes); + tty->print_cr("first_field_offset = %d", _first_field_offset); + } + tty->print_cr("---"); + } +} --- /dev/null 2019-07-19 10:38:42.000000000 -0400 +++ new/src/hotspot/share/classfile/fieldLayoutBuilder.hpp 2019-07-19 10:38:42.000000000 -0400 @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP +#define SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP + +#include "classfile/classFileParser.hpp" +#include "classfile/classLoaderData.inline.hpp" +#include "utilities/growableArray.hpp" + +/* Classes below are used to compute the field layout of classes. */ + + +/* A RawBlock describes an element of a layout. + * Each field is represented by a RawBlock. + * RawBlocks can also represent elements injected by the JVM: + * padding, empty blocks, inherited fields, etc. + * All RawBlock must have a size and a alignment. The size is the + * exact size of the field expressed in bytes. The alignment is + * the alignment constraint of the field (1 for byte, 2 for short, + * 4 for int, 8 for long, etc.) + * + * RawBlock are designed to be inserted in two linked list: + * - a field group (using _next_field, _prev_field) + * - a layout (using _next_block, _prev_block) + * + * next/prev pointers are included in the RawBlock class to narrow + * the number of allocation required during the computation of a layout. + */ +class RawBlock : public ResourceObj { + public: + enum Kind { + EMPTY, // empty slot, space is taken from this to allocate fields + RESERVED, // reserved for JVM usage (for instance object header) + PADDING, // padding (because of alignment constraints or @Contended) + REGULAR, // primitive or oop field (including non-flattened inline fields) + FLATTENED, // flattened field + INHERITED // field(s) inherited from super classes + }; + + private: + RawBlock* _next_field; + RawBlock* _prev_field; + RawBlock* _next_block; + RawBlock* _prev_block; + Kind _kind; + int _offset; + int _alignment; + int _size; + int _field_index; + bool _is_reference; + ValueKlass* _value_klass; + + public: + RawBlock(Kind kind, int size = -1, int alignment = 1); + RawBlock(int index, Kind kind, int size = -1, int alignment = -1, bool is_reference = false); + RawBlock* next_field() const { return _next_field; } + void set_next_field(RawBlock* next) { _next_field = next; } + RawBlock* prev_field() const { return _prev_field; } + void set_prev_field(RawBlock* prev) { _prev_field = prev; } + RawBlock* next_block() const { return _next_block; } + void set_next_block(RawBlock* next) { _next_block = next; } + RawBlock* prev_block() const { return _prev_block; } + void set_prev_block(RawBlock* prev) { _prev_block = prev; } + Kind kind() const { return _kind; } + int offset() const { + assert(_offset >= 0, "Mut be initialized"); + return _offset; + } + void set_offset(int offset) { _offset = offset; } + int alignment() const { return _alignment; } + int size() const { return _size; } + void set_size(int size) { _size = size; } + int field_index() const { + assert(_field_index != -1, "Must be initialized"); + return _field_index; + } + bool is_reference() const { return _is_reference; } + ValueKlass* value_klass() const { + assert(_value_klass != NULL, "Must be initialized"); + return _value_klass; + } + void set_value_klass(ValueKlass* value_klass) { _value_klass = value_klass; } + + bool fit(int size, int alignment); + +}; + +/* A Field group represents a set of fields that have to be allocated together, + * this is the way the @Contended annotation is supported. + * Inside a FieldGroup, fields are sorted based on their kind: primitive, + * oop, or flattened. + * + */ +class FieldGroup : public ResourceObj { + + private: + FieldGroup* _next; + RawBlock* _primitive_fields; + RawBlock* _oop_fields; + RawBlock* _flattened_fields; + int _contended_group; + int _oop_count; + + public: + FieldGroup(int contended_group = -1); + + FieldGroup* next() const { return _next; } + void set_next(FieldGroup* next) { _next = next; } + RawBlock* primitive_fields() const { return _primitive_fields; } + RawBlock* oop_fields() const { return _oop_fields; } + RawBlock* flattened_fields() const { return _flattened_fields; } + int contended_group() const { return _contended_group; } + int oop_count() const { return _oop_count; } + + void add_primitive_field(AllFieldStream fs, BasicType type); + void add_oop_field(AllFieldStream fs); + void add_flattened_field(AllFieldStream fs, ValueKlass* vk); + void add_block(RawBlock** list, RawBlock* block); +}; + +/* The FieldLayout class represents a set of fields organized + * in a layout. + * An instance of FieldLayout can either represent the layout + * of non-static fields (used in an instance object) or the + * layout of static fields (to be included in the class mirror). + * + * _block is a pointer to a list of RawBlock ordered by increasing + * offsets. + * _start points to the RawBlock with the first offset that can + * be used to allocate fields of the current class + * _last points to the last RawBlock of the list. In order to + * simplify the code, the RawBlock list always ends with an + * EMPTY block (the kind of RawBlock from which space is taken + * to allocate fields) with a size big enough to satisfy all + * field allocations. + */ +class FieldLayout : public ResourceObj { + private: + Array* _fields; + ConstantPool* _cp; + RawBlock* _blocks; + RawBlock* _start; + RawBlock* _last; + + public: + FieldLayout(Array* fields, ConstantPool* cp); + void initialize_static_layout(); + void initialize_instance_layout(const InstanceKlass* ik); + + RawBlock* first_empty_block() { + RawBlock* block = _start; + while (block->kind() != RawBlock::EMPTY) { + block = block->next_block(); + } + return block; + } + + RawBlock* start() { return _start; } + void set_start(RawBlock* start) { _start = start; } + RawBlock* last_block() { return _last; } + + RawBlock* first_field_block(); + void add(RawBlock* blocks, RawBlock* start = NULL); + void add_contiguously(RawBlock* blocks, RawBlock* start = NULL); + RawBlock* insert_field_block(RawBlock* slot, RawBlock* block); + void reconstruct_layout(const InstanceKlass* ik); + void fill_holes(const InstanceKlass* ik); + RawBlock* insert(RawBlock* slot, RawBlock* block); + void insert_per_offset(RawBlock* block); + void remove(RawBlock* block); + void print(outputStream* output); +}; + + +/* FieldLayoutBuilder is the main entry point for layout computation. + * This class has two methods to generate layout: one for identity classes + * and one for inline classes. The rational for having two methods + * is that each kind of classes has a different set goals regarding + * its layout, so instead of mixing two layout strategies into a + * single method, each kind has its own method (see comments below + * for more details about the allocation strategies). + * + * Computing the layout of a class always goes through 4 steps: + * 1 - Prologue: preparation of data structure and gathering of + * layout information inherited from super classes + * 2 - Field sorting: fields are sorted out according to their + * kind (oop, primitive, inline class) and their contention + * annotation (if any) + * 3 - Layout is computed from the set of lists generated during + * step 2 + * 4 - Epilogue: oopmaps are generated, layout information are + * prepared so other VM components can use them (instance size, + * static field size, non-static field size, etc.) + * + * Steps 1 and 4 are common to all layout computations. Step 2 and 3 + * differ for inline classes and identity classes. + */ +class FieldLayoutBuilder : public ResourceObj { + private: + ClassFileParser* _cfp; + FieldLayoutInfo* _info; + RawBlock* _fields; + FieldGroup* _root_group; + FieldGroup* _contended_groups; + FieldGroup* _static_fields; + FieldLayout* _layout; + FieldLayout* _static_layout; + int _nonstatic_oopmap_count; + int _alignment; + int _first_field_offset; + int _exact_size_in_bytes; + bool _has_nonstatic_fields; + bool _has_flattening_information; + + FieldGroup* get_contended_group(int g); + + public: + FieldLayoutBuilder(ClassFileParser* cfp, FieldLayoutInfo* info); + + int get_alignment() { + assert(_alignment != -1, "Uninitialized"); + return _alignment; + } + + int get_first_field_offset() { + assert(_first_field_offset != -1, "Uninitialized"); + return _first_field_offset; + } + + int get_exact_size_in_byte() { + assert(_exact_size_in_bytes != -1, "Uninitialized"); + return _exact_size_in_bytes; + } + + void compute_regular_layout(TRAPS); + void compute_inline_class_layout(TRAPS); + + protected: + void prologue(); + void epilogue(); + void regular_field_sorting(TRAPS); + void inline_class_field_sorting(TRAPS); +}; + +#endif // SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP