< prev index next >

src/hotspot/share/classfile/fieldLayoutBuilder.cpp

Print this page
rev 59083 : DRAFT 8236522: NonTearable marker interface for inline classes to enforce atomicity


 522                                        Handle protection_domain, FieldLayoutInfo* info) :
 523   _classname(classname),
 524   _super_klass(super_klass),
 525   _constant_pool(constant_pool),
 526   _fields(fields),
 527   _info(info),
 528   _root_group(NULL),
 529   _contended_groups(GrowableArray<FieldGroup*>(8)),
 530   _static_fields(NULL),
 531   _layout(NULL),
 532   _static_layout(NULL),
 533   _class_loader_data(class_loader_data),
 534   _protection_domain(protection_domain),
 535   _nonstatic_oopmap_count(0),
 536   _alignment(-1),
 537   _first_field_offset(-1),
 538   _exact_size_in_bytes(-1),
 539   _has_nonstatic_fields(false),
 540   _is_contended(is_contended),
 541   _is_value_type(is_value_type),
 542   _has_flattening_information(is_value_type) {}



 543 
 544 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 545   assert(g > 0, "must only be called for named contended groups");
 546   FieldGroup* fg = NULL;
 547   for (int i = 0; i < _contended_groups.length(); i++) {
 548     fg = _contended_groups.at(i);
 549     if (fg->contended_group() == g) return fg;
 550   }
 551   fg = new FieldGroup(g);
 552   _contended_groups.append(fg);
 553   return fg;
 554 }
 555 
 556 void FieldLayoutBuilder::prologue() {
 557   _layout = new FieldLayout(_fields, _constant_pool);
 558   const InstanceKlass* super_klass = _super_klass;
 559   _layout->initialize_instance_layout(super_klass);
 560   if (super_klass != NULL) {
 561     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 562   }
 563   _static_layout = new FieldLayout(_fields, _constant_pool);
 564   _static_layout->initialize_static_layout();
 565   _static_fields = new FieldGroup();
 566   _root_group = new FieldGroup();
 567 }
 568 
 569 // Field sorting for regular (non-inline) classes:
 570 //   - fields are sorted in static and non-static fields
 571 //   - non-static fields are also sorted according to their contention group
 572 //     (support of the @Contended annotation)
 573 //   - @Contended annotation is ignored for static fields
 574 //   - field flattening decisions are taken in this method
 575 void FieldLayoutBuilder::regular_field_sorting() {
 576   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 577     FieldGroup* group = NULL;
 578     if (fs.access_flags().is_static()) {
 579       group = _static_fields;
 580     } else {
 581       _has_nonstatic_fields = true;

 582       if (fs.is_contended()) {
 583         int g = fs.contended_group();
 584         if (g == 0) {
 585           group = new FieldGroup(true);
 586           _contended_groups.append(group);
 587         } else {
 588           group = get_or_create_contended_group(g);
 589         }
 590       } else {
 591         group = _root_group;
 592       }
 593     }
 594     assert(group != NULL, "invariant");
 595     BasicType type = Signature::basic_type(fs.signature());
 596     switch(type) {
 597     case T_BYTE:
 598     case T_CHAR:
 599     case T_DOUBLE:
 600     case T_FLOAT:
 601     case T_INT:


 609       if (group != _static_fields) _nonstatic_oopmap_count++;
 610       group->add_oop_field(fs);
 611       break;
 612     case T_VALUETYPE:
 613       if (group == _static_fields) {
 614         // static fields are never flattened
 615         group->add_oop_field(fs);
 616       } else {
 617         _has_flattening_information = true;
 618         // Flattening decision to be taken here
 619         // This code assumes all verification have been performed before
 620         // (field is a flattenable field, field's type has been loaded
 621         // and it is an inline klass
 622         Thread* THREAD = Thread::current();
 623         Klass* klass =
 624             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 625                                                                 Handle(THREAD, _class_loader_data->class_loader()),
 626                                                                 _protection_domain, true, THREAD);
 627         assert(klass != NULL, "Sanity check");
 628         ValueKlass* vk = ValueKlass::cast(klass);
 629         bool has_flattenable_size = (ValueFieldMaxFlatSize < 0)
 630                                    || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;





 631         // volatile fields are currently never flattened, this could change in the future
 632         bool flattened = !fs.access_flags().is_volatile() && has_flattenable_size;
 633         if (flattened) {
 634           group->add_flattened_field(fs, vk);
 635           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 636           fs.set_flattened(true);




 637         } else {
 638           _nonstatic_oopmap_count++;
 639           group->add_oop_field(fs);
 640         }
 641       }
 642       break;
 643     default:
 644       fatal("Something wrong?");
 645     }
 646   }
 647   _root_group->sort_by_size();
 648   _static_fields->sort_by_size();
 649   if (!_contended_groups.is_empty()) {
 650     for (int i = 0; i < _contended_groups.length(); i++) {
 651       _contended_groups.at(i)->sort_by_size();
 652     }
 653   }
 654 }
 655 
 656 /* Field sorting for inline classes:
 657  *   - because inline classes are immutable, the @Contended annotation is ignored
 658  *     when computing their layout (with only read operation, there's no false
 659  *     sharing issue)
 660  *   - this method also records the alignment of the field with the most
 661  *     constraining alignment, this value is then used as the alignment
 662  *     constraint when flattening this inline type into another container
 663  *   - field flattening decisions are taken in this method (those decisions are
 664  *     currently only based in the size of the fields to be flattened, the size
 665  *     of the resulting instance is not considered)
 666  */
 667 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 668   assert(_is_value_type, "Should only be used for inline classes");
 669   int alignment = 1;
 670   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 671     FieldGroup* group = NULL;
 672     int field_alignment = 1;
 673     if (fs.access_flags().is_static()) {
 674       group = _static_fields;
 675     } else {
 676       _has_nonstatic_fields = true;

 677       group = _root_group;
 678     }
 679     assert(group != NULL, "invariant");
 680     BasicType type = Signature::basic_type(fs.signature());
 681     switch(type) {
 682     case T_BYTE:
 683     case T_CHAR:
 684     case T_DOUBLE:
 685     case T_FLOAT:
 686     case T_INT:
 687     case T_LONG:
 688     case T_SHORT:
 689     case T_BOOLEAN:
 690       if (group != _static_fields) {
 691         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 692       }
 693       group->add_primitive_field(fs, type);
 694       break;
 695     case T_OBJECT:
 696     case T_ARRAY:


 699         field_alignment = type2aelembytes(type); // alignment == size for oops
 700       }
 701       group->add_oop_field(fs);
 702       break;
 703     case T_VALUETYPE: {
 704       if (group == _static_fields) {
 705         // static fields are never flattened
 706         group->add_oop_field(fs);
 707       } else {
 708         // Flattening decision to be taken here
 709         // This code assumes all verifications have been performed before
 710         // (field is a flattenable field, field's type has been loaded
 711         // and it is an inline klass
 712         Thread* THREAD = Thread::current();
 713         Klass* klass =
 714             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 715                 Handle(THREAD, _class_loader_data->class_loader()),
 716                 _protection_domain, true, CHECK);
 717         assert(klass != NULL, "Sanity check");
 718         ValueKlass* vk = ValueKlass::cast(klass);
 719         bool flattened = (ValueFieldMaxFlatSize < 0)
 720                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 721         if (flattened) {







 722           group->add_flattened_field(fs, vk);
 723           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 724           field_alignment = vk->get_alignment();
 725           fs.set_flattened(true);




 726         } else {
 727           _nonstatic_oopmap_count++;
 728           field_alignment = type2aelembytes(T_OBJECT);
 729           group->add_oop_field(fs);
 730         }
 731       }
 732       break;
 733     }
 734     default:
 735       fatal("Unexpected BasicType");
 736     }
 737     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 738   }
 739   _alignment = alignment;
 740   if (!_has_nonstatic_fields) {
 741     // There are a number of fixes required throughout the type system and JIT
 742     Exceptions::fthrow(THREAD_AND_LOCATION,
 743                        vmSymbols::java_lang_ClassFormatError(),
 744                        "Value Types do not support zero instance size yet");
 745     return;


 965         nonstatic_oop_maps->add(cg->oop_fields()->at(0)->offset(), cg->oop_count());
 966       }
 967     }
 968   }
 969 
 970   nonstatic_oop_maps->compact();
 971 
 972   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 973   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 974   int static_fields_size = (static_fields_end -
 975       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
 976   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
 977 
 978   // Pass back information needed for InstanceKlass creation
 979 
 980   _info->oop_map_blocks = nonstatic_oop_maps;
 981   _info->_instance_size = align_object_size(instance_end / wordSize);
 982   _info->_static_field_size = static_fields_size;
 983   _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
 984   _info->_has_nonstatic_fields = _has_nonstatic_fields;













 985 
 986   if (PrintFieldLayout) {
 987     ResourceMark rm;
 988     tty->print_cr("Layout of class %s", _classname->as_C_string());
 989     tty->print_cr("Instance fields:");
 990     _layout->print(tty, false, _super_klass);
 991     tty->print_cr("Static fields:");
 992     _static_layout->print(tty, true, NULL);
 993     tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
 994     if (_is_value_type) {
 995       tty->print_cr("First field offset = %d", _first_field_offset);
 996       tty->print_cr("Alignment = %d bytes", _alignment);
 997       tty->print_cr("Exact size = %d bytes", _exact_size_in_bytes);
 998     }
 999     tty->print_cr("---");
1000   }
1001 }
1002 
1003 void FieldLayoutBuilder::build_layout(TRAPS) {
1004   if (_classname == vmSymbols::java_lang_ref_Reference()) {


 522                                        Handle protection_domain, FieldLayoutInfo* info) :
 523   _classname(classname),
 524   _super_klass(super_klass),
 525   _constant_pool(constant_pool),
 526   _fields(fields),
 527   _info(info),
 528   _root_group(NULL),
 529   _contended_groups(GrowableArray<FieldGroup*>(8)),
 530   _static_fields(NULL),
 531   _layout(NULL),
 532   _static_layout(NULL),
 533   _class_loader_data(class_loader_data),
 534   _protection_domain(protection_domain),
 535   _nonstatic_oopmap_count(0),
 536   _alignment(-1),
 537   _first_field_offset(-1),
 538   _exact_size_in_bytes(-1),
 539   _has_nonstatic_fields(false),
 540   _is_contended(is_contended),
 541   _is_value_type(is_value_type),
 542   _has_flattening_information(is_value_type),
 543   _has_nonatomic_values(false),
 544   _atomic_field_count(0)
 545  {}
 546 
 547 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 548   assert(g > 0, "must only be called for named contended groups");
 549   FieldGroup* fg = NULL;
 550   for (int i = 0; i < _contended_groups.length(); i++) {
 551     fg = _contended_groups.at(i);
 552     if (fg->contended_group() == g) return fg;
 553   }
 554   fg = new FieldGroup(g);
 555   _contended_groups.append(fg);
 556   return fg;
 557 }
 558 
 559 void FieldLayoutBuilder::prologue() {
 560   _layout = new FieldLayout(_fields, _constant_pool);
 561   const InstanceKlass* super_klass = _super_klass;
 562   _layout->initialize_instance_layout(super_klass);
 563   if (super_klass != NULL) {
 564     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 565   }
 566   _static_layout = new FieldLayout(_fields, _constant_pool);
 567   _static_layout->initialize_static_layout();
 568   _static_fields = new FieldGroup();
 569   _root_group = new FieldGroup();
 570 }
 571 
 572 // Field sorting for regular (non-inline) classes:
 573 //   - fields are sorted in static and non-static fields
 574 //   - non-static fields are also sorted according to their contention group
 575 //     (support of the @Contended annotation)
 576 //   - @Contended annotation is ignored for static fields
 577 //   - field flattening decisions are taken in this method
 578 void FieldLayoutBuilder::regular_field_sorting() {
 579   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 580     FieldGroup* group = NULL;
 581     if (fs.access_flags().is_static()) {
 582       group = _static_fields;
 583     } else {
 584       _has_nonstatic_fields = true;
 585       _atomic_field_count++;  // we might decrement this
 586       if (fs.is_contended()) {
 587         int g = fs.contended_group();
 588         if (g == 0) {
 589           group = new FieldGroup(true);
 590           _contended_groups.append(group);
 591         } else {
 592           group = get_or_create_contended_group(g);
 593         }
 594       } else {
 595         group = _root_group;
 596       }
 597     }
 598     assert(group != NULL, "invariant");
 599     BasicType type = Signature::basic_type(fs.signature());
 600     switch(type) {
 601     case T_BYTE:
 602     case T_CHAR:
 603     case T_DOUBLE:
 604     case T_FLOAT:
 605     case T_INT:


 613       if (group != _static_fields) _nonstatic_oopmap_count++;
 614       group->add_oop_field(fs);
 615       break;
 616     case T_VALUETYPE:
 617       if (group == _static_fields) {
 618         // static fields are never flattened
 619         group->add_oop_field(fs);
 620       } else {
 621         _has_flattening_information = true;
 622         // Flattening decision to be taken here
 623         // This code assumes all verification have been performed before
 624         // (field is a flattenable field, field's type has been loaded
 625         // and it is an inline klass
 626         Thread* THREAD = Thread::current();
 627         Klass* klass =
 628             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 629                                                                 Handle(THREAD, _class_loader_data->class_loader()),
 630                                                                 _protection_domain, true, THREAD);
 631         assert(klass != NULL, "Sanity check");
 632         ValueKlass* vk = ValueKlass::cast(klass);
 633         bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 &&
 634                                    (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize);
 635         bool too_atomic_to_flatten = vk->is_declared_atomic();
 636         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
 637         if (vk->is_naturally_atomic()) {
 638           too_atomic_to_flatten = false;
 639           //too_volatile_to_flatten = false; //FIXME
 640           // volatile fields are currently never flattened, this could change in the future
 641         }
 642         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 643           group->add_flattened_field(fs, vk);
 644           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 645           fs.set_flattened(true);
 646           if (!vk->is_atomic()) {  // flat and non-atomic: take note
 647             _has_nonatomic_values = true;
 648             _atomic_field_count--;  // every other field is atomic but this one
 649           }
 650         } else {
 651           _nonstatic_oopmap_count++;
 652           group->add_oop_field(fs);
 653         }
 654       }
 655       break;
 656     default:
 657       fatal("Something wrong?");
 658     }
 659   }
 660   _root_group->sort_by_size();
 661   _static_fields->sort_by_size();
 662   if (!_contended_groups.is_empty()) {
 663     for (int i = 0; i < _contended_groups.length(); i++) {
 664       _contended_groups.at(i)->sort_by_size();
 665     }
 666   }
 667 }
 668 
 669 /* Field sorting for inline classes:
 670  *   - because inline classes are immutable, the @Contended annotation is ignored
 671  *     when computing their layout (with only read operation, there's no false
 672  *     sharing issue)
 673  *   - this method also records the alignment of the field with the most
 674  *     constraining alignment, this value is then used as the alignment
 675  *     constraint when flattening this inline type into another container
 676  *   - field flattening decisions are taken in this method (those decisions are
 677  *     currently only based in the size of the fields to be flattened, the size
 678  *     of the resulting instance is not considered)
 679  */
 680 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 681   assert(_is_value_type, "Should only be used for inline classes");
 682   int alignment = 1;
 683   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 684     FieldGroup* group = NULL;
 685     int field_alignment = 1;
 686     if (fs.access_flags().is_static()) {
 687       group = _static_fields;
 688     } else {
 689       _has_nonstatic_fields = true;
 690       _atomic_field_count++;  // we might decrement this
 691       group = _root_group;
 692     }
 693     assert(group != NULL, "invariant");
 694     BasicType type = Signature::basic_type(fs.signature());
 695     switch(type) {
 696     case T_BYTE:
 697     case T_CHAR:
 698     case T_DOUBLE:
 699     case T_FLOAT:
 700     case T_INT:
 701     case T_LONG:
 702     case T_SHORT:
 703     case T_BOOLEAN:
 704       if (group != _static_fields) {
 705         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 706       }
 707       group->add_primitive_field(fs, type);
 708       break;
 709     case T_OBJECT:
 710     case T_ARRAY:


 713         field_alignment = type2aelembytes(type); // alignment == size for oops
 714       }
 715       group->add_oop_field(fs);
 716       break;
 717     case T_VALUETYPE: {
 718       if (group == _static_fields) {
 719         // static fields are never flattened
 720         group->add_oop_field(fs);
 721       } else {
 722         // Flattening decision to be taken here
 723         // This code assumes all verifications have been performed before
 724         // (field is a flattenable field, field's type has been loaded
 725         // and it is an inline klass
 726         Thread* THREAD = Thread::current();
 727         Klass* klass =
 728             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 729                 Handle(THREAD, _class_loader_data->class_loader()),
 730                 _protection_domain, true, CHECK);
 731         assert(klass != NULL, "Sanity check");
 732         ValueKlass* vk = ValueKlass::cast(klass);
 733         bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 &&
 734                                    (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize);
 735         bool too_atomic_to_flatten = vk->is_declared_atomic();
 736         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
 737         if (vk->is_naturally_atomic()) {
 738           too_atomic_to_flatten = false;
 739           //too_volatile_to_flatten = false; //FIXME
 740           // volatile fields are currently never flattened, this could change in the future
 741         }
 742         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 743           group->add_flattened_field(fs, vk);
 744           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 745           field_alignment = vk->get_alignment();
 746           fs.set_flattened(true);
 747           if (!vk->is_atomic()) {  // flat and non-atomic: take note
 748             _has_nonatomic_values = true;
 749             _atomic_field_count--;  // every other field is atomic but this one
 750           }
 751         } else {
 752           _nonstatic_oopmap_count++;
 753           field_alignment = type2aelembytes(T_OBJECT);
 754           group->add_oop_field(fs);
 755         }
 756       }
 757       break;
 758     }
 759     default:
 760       fatal("Unexpected BasicType");
 761     }
 762     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 763   }
 764   _alignment = alignment;
 765   if (!_has_nonstatic_fields) {
 766     // There are a number of fixes required throughout the type system and JIT
 767     Exceptions::fthrow(THREAD_AND_LOCATION,
 768                        vmSymbols::java_lang_ClassFormatError(),
 769                        "Value Types do not support zero instance size yet");
 770     return;


 990         nonstatic_oop_maps->add(cg->oop_fields()->at(0)->offset(), cg->oop_count());
 991       }
 992     }
 993   }
 994 
 995   nonstatic_oop_maps->compact();
 996 
 997   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 998   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 999   int static_fields_size = (static_fields_end -
1000       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
1001   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
1002 
1003   // Pass back information needed for InstanceKlass creation
1004 
1005   _info->oop_map_blocks = nonstatic_oop_maps;
1006   _info->_instance_size = align_object_size(instance_end / wordSize);
1007   _info->_static_field_size = static_fields_size;
1008   _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
1009   _info->_has_nonstatic_fields = _has_nonstatic_fields;
1010 
1011   // A value type is naturally atomic if it has just one field, and
1012   // that field is simple enough.
1013   _info->_is_naturally_atomic = (_is_value_type &&
1014                                  (_atomic_field_count <= 1) &&
1015                                  !_has_nonatomic_values &&
1016                                  _contended_groups.is_empty());
1017   // This may be too restrictive, since if all the fields fit in 64
1018   // bits we could make the decision to align instances of this class
1019   // to 64-bit boundaries, and load and store them as single words.
1020   // And on machines which supported larger atomics we could similarly
1021   // allow larger values to be atomic, if properly aligned.
1022 
1023 
1024   if (PrintFieldLayout) {
1025     ResourceMark rm;
1026     tty->print_cr("Layout of class %s", _classname->as_C_string());
1027     tty->print_cr("Instance fields:");
1028     _layout->print(tty, false, _super_klass);
1029     tty->print_cr("Static fields:");
1030     _static_layout->print(tty, true, NULL);
1031     tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
1032     if (_is_value_type) {
1033       tty->print_cr("First field offset = %d", _first_field_offset);
1034       tty->print_cr("Alignment = %d bytes", _alignment);
1035       tty->print_cr("Exact size = %d bytes", _exact_size_in_bytes);
1036     }
1037     tty->print_cr("---");
1038   }
1039 }
1040 
1041 void FieldLayoutBuilder::build_layout(TRAPS) {
1042   if (_classname == vmSymbols::java_lang_ref_Reference()) {
< prev index next >