1 /*
   2  * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include <classfile/fieldLayoutBuilder.hpp>
  26 #include "precompiled.hpp"
  27 #include "jvm.h"
  28 #include "classfile/classFileParser.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/array.hpp"
  31 #include "oops/instanceMirrorKlass.hpp"
  32 #include "oops/valueKlass.hpp"
  33 #include "runtime/fieldDescriptor.inline.hpp"
  34 
  35 RawBlock::RawBlock(Kind kind, int size, int alignment) {
  36   _next_field = NULL;
  37   _prev_field = NULL;
  38   _next_block = NULL;
  39   _prev_block = NULL;
  40   _field_index = -1; // no field
  41   assert(kind != REGULAR && kind != FLATTENED,
  42       "Otherwise, should use the constructor with a field index argument");
  43   _kind = kind;
  44   _size = size;
  45   _alignment = alignment;
  46   _offset = -1;
  47   _is_reference = false;
  48   _value_klass = NULL;
  49   assert(_alignment > 0, "Sanity check");
  50 }
  51 
  52 RawBlock::RawBlock(int index, Kind kind, int size, int alignment, bool is_reference) {
  53   _next_field = NULL;
  54   _prev_field = NULL;
  55   _next_block = NULL;
  56   _prev_block = NULL;
  57   _field_index = index;
  58   assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED,
  59       "Other kind do not have a field index");
  60   _kind = kind;
  61   _size = size;
  62   _alignment = alignment;
  63   _offset = -1;
  64   _is_reference = is_reference;
  65   _value_klass = NULL;
  66   assert(_size > 0, "Sanity check");
  67   assert(_alignment > 0, "Sanity check");
  68 }
  69 
  70 bool RawBlock::fit(int size, int alignment) {
  71   int adjustment = _offset % alignment;
  72   return _size >= size + adjustment;
  73 }
  74 
  75 FieldGroup::FieldGroup(int contended_group) {
  76   _next = NULL;
  77   _primitive_fields = NULL;
  78   _oop_fields = NULL;
  79   _flattened_fields = NULL;
  80   _contended_group = contended_group; // -1 means no contended group, 0 means default contended group
  81   _oop_count = 0;
  82 }
  83 
  84 void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
  85   int size = type2aelembytes(type);
  86   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
  87   add_block(&_primitive_fields, block);
  88 }
  89 
  90 void FieldGroup::add_oop_field(AllFieldStream fs) {
  91   int size = type2aelembytes(T_OBJECT);
  92   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for oops */, true);
  93   add_block(&_oop_fields, block);
  94   _oop_count++;
  95 }
  96 
  97 void FieldGroup::add_flattened_field(AllFieldStream fs, ValueKlass* vk) {
  98   // _flattened_fields list might be merged with the _primitive_fields list in the future
  99   RawBlock* block = new RawBlock(fs.index(), RawBlock::FLATTENED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
 100   block->set_value_klass(vk);
 101   add_block(&_flattened_fields, block);
 102 }
 103 
 104 /* Adds a field to a field group. Inside a field group, fields are sorted by
 105  * decreasing sizes. Fields with the same size are sorted according to their
 106  * order of insertion (easy hack to respect field order for classes with
 107  * hard coded offsets).
 108  */
 109 void FieldGroup::add_block(RawBlock** list, RawBlock* block) {
 110   if (*list == NULL) {
 111     *list = block;
 112   } else {
 113     if (block->size() > (*list)->size()) {  // cannot be >= to respect order of field (for classes with hard coded offsets)
 114       block->set_next_field(*list);
 115       (*list)->set_prev_field(block);
 116       *list = block;
 117     } else {
 118       RawBlock* b = *list;
 119       while (b->next_field() != NULL) {
 120         if (b->next_field()->size() < block->size()) {
 121           break;
 122         }
 123         b = b->next_field();
 124       }
 125       block->set_next_field(b->next_field());
 126       block->set_prev_field(b);
 127       b->set_next_field(block);
 128       if (b->next_field() != NULL) {
 129         b->next_field()->set_prev_field(block);
 130       }
 131     }
 132   }
 133 }
 134 
 135 FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) {
 136   _fields = fields;
 137   _cp = cp;
 138   _blocks = NULL;
 139   _start = _blocks;
 140   _last = _blocks;
 141 }
 142 
 143 void FieldLayout::initialize_static_layout() {
 144   _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 145   _blocks->set_offset(0);
 146   _last = _blocks;
 147   _start = _blocks;
 148   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() is zero, because
 149   // during bootstrapping, the size of the java.lang.Class is still not known when layout
 150   // of static field is computed. Field offsets are fixed later when the size is known
 151   // (see java_lang_Class::fixup_mirror())
 152   insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
 153   _blocks->set_offset(0);
 154 }
 155 
 156 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klasss) {
 157   if (super_klasss == NULL) {
 158     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 159     _blocks->set_offset(0);
 160     _last = _blocks;
 161     _start = _blocks;
 162     insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 163   } else {
 164     // The JVM could reconstruct the layouts of the super classes, in order to use the
 165     // empty slots in these layouts to allocate current class' fields. However, some codes
 166     // in the JVM are not ready yet to find fields allocated this way, so the optimization
 167     // is not enabled yet.
 168 #if 0
 169     reconstruct_layout(super_klasss);
 170     fill_holes(super_klasss);
 171     // _start = _last;  // uncomment to fill holes in super classes layouts
 172 #else
 173     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 174     _blocks->set_offset(0);
 175     _last = _blocks;
 176     insert(_last, new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 177     if (super_klasss->nonstatic_field_size() > 0) {
 178       // To take into account the space allocated to super classes' fields, this code
 179       // uses the nonstatic_field_size() value to allocate a single INHERITED RawBlock.
 180       // The drawback is that nonstatic_field_size() expresses the size of non-static
 181       // fields in heapOopSize, which implies that some space could be lost at the
 182       // end because of the rounding up of the real size. Using the exact size, with
 183       // no rounding up, would be possible, but would require modifications to other
 184       // codes in the JVM performing fields lookup (as they often expect this rounding
 185       // to be applied).
 186       RawBlock* inherited = new RawBlock(RawBlock::INHERITED,
 187           super_klasss->nonstatic_field_size() * heapOopSize);
 188       insert(_last, inherited);
 189     }
 190     _start = _last;
 191 #endif
 192   }
 193 }
 194 
 195 RawBlock* FieldLayout::first_field_block() {
 196   RawBlock* block = _start;
 197   // Not sure the condition below will work well when inheriting layout with contented padding
 198   while (block->kind() != RawBlock::INHERITED && block->kind() != RawBlock::REGULAR
 199       && block->kind() != RawBlock::FLATTENED && block->kind() != RawBlock::PADDING) {
 200     block = block->next_block();
 201   }
 202   return block;
 203 }
 204 
 205 /* The allocation logic uses a first fit strategy: the field is allocated in the
 206  * first empty slot big enough to contain it (including padding to fit alignment
 207  * constraints).
 208  */
 209 void FieldLayout::add(RawBlock* blocks, RawBlock* start) {
 210   if (start == NULL) {
 211     // start = this->_blocks;
 212     start = this->_start;
 213   }
 214   RawBlock* b = blocks;
 215   RawBlock* candidate = NULL;
 216   while (b != NULL) {
 217     RawBlock* candidate = start;
 218     while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(b->size(), b->alignment())) candidate = candidate->next_block();
 219     assert(candidate != NULL && candidate->fit(b->size(), b->alignment()), "paranoid check");
 220     insert_field_block(candidate, b);
 221     b = b->next_field();
 222   }
 223 }
 224 
 225 /* The allocation logic uses a first fit strategy: the set of fields is allocated
 226  * in the first empty slot big enough to contain the whole set ((including padding
 227  * to fit alignment constraints).
 228  */
 229 void FieldLayout::add_contiguously(RawBlock* blocks, RawBlock* start) {
 230   if (blocks == NULL) return;
 231   if (start == NULL) {
 232     start = _start;
 233   }
 234   // This code assumes that if the first block is well aligned, the following
 235   // blocks would naturally be well aligned (no need for adjustment)
 236   int size = 0;
 237   RawBlock* b = blocks;
 238   while (b != NULL) {
 239     size += b->size();
 240     b = b->next_field();
 241   }
 242   RawBlock* candidate = start;
 243   while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(size, blocks->alignment())) candidate = candidate->next_block();
 244   b = blocks;
 245   while (b != NULL) {
 246     insert_field_block(candidate, b);
 247     b = b->next_field();
 248     assert(b == NULL || (candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
 249   }
 250 }
 251 
 252 RawBlock* FieldLayout::insert_field_block(RawBlock* slot, RawBlock* block) {
 253   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 254   if (slot->offset() % block->alignment() != 0) {
 255     int adjustment = block->alignment() - (slot->offset() % block->alignment());
 256     RawBlock* adj = new RawBlock(RawBlock::EMPTY, adjustment);
 257     insert(slot, adj);
 258   }
 259   insert(slot, block);
 260   if (slot->size() == 0) {
 261     remove(slot);
 262   }
 263   if (UseNewLayout) {
 264     FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
 265   }
 266   return block;
 267 }
 268 
 269 void FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
 270   // TODO: it makes no sense to support static fields, static fields go to
 271   // the mirror, and are not impacted by static fields of the parent class
 272   if (ik->super() != NULL) {
 273     reconstruct_layout(InstanceKlass::cast(ik->super()));
 274   } else {
 275     _blocks = new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
 276     _blocks->set_offset(0);
 277     _last = _blocks;
 278     _start = _blocks;
 279   }
 280   for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
 281     BasicType type = vmSymbols::signature_type(fs.signature());
 282     // distinction between static and non-static fields is missing
 283     if (fs.access_flags().is_static()) continue;
 284     ik->fields_annotations();
 285     if (type != T_VALUETYPE) {
 286       int size = type2aelembytes(type);
 287       // INHERITED blocs are marked as non-reference because oop_maps are handled by their holder class
 288       RawBlock* block = new RawBlock(fs.index(), RawBlock::INHERITED, size, size, false);
 289       block->set_offset(fs.offset());
 290       insert_per_offset(block);
 291     } else {
 292       fatal("Not supported yet");
 293     }
 294   }
 295 }
 296 
 297 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
 298   assert(_blocks != NULL, "Sanity check");
 299   assert(_blocks->offset() == 0, "first block must be at offset zero");
 300   RawBlock* b = _blocks;
 301   while (b->next_block() != NULL) {
 302     if (b->next_block()->offset() > (b->offset() + b->size())) {
 303       int size = b->next_block()->offset() - (b->offset() + b->size());
 304       RawBlock* empty = new RawBlock(RawBlock::EMPTY, size);
 305       empty->set_offset(b->offset() + b->size());
 306       empty->set_next_block(b->next_block());
 307       b->next_block()->set_prev_block(empty);
 308       b->set_next_block(empty);
 309       empty->set_prev_block(b);
 310     }
 311     b = b->next_block();
 312   }
 313   assert(b->next_block() == NULL, "Invariant at this point");
 314   if (b->kind() != RawBlock::EMPTY) {
 315     RawBlock* last = new RawBlock(RawBlock::EMPTY, INT_MAX);
 316     last->set_offset(b->offset() + b->size());
 317     assert(last->offset() > 0, "Sanity check");
 318     b->set_next_block(last);
 319     last->set_prev_block(b);
 320     _last = last;
 321   }
 322   // Still doing the padding to have a size that can be expressed in heapOopSize
 323   int super_end = instanceOopDesc::base_offset_in_bytes() + super_klass->nonstatic_field_size() * heapOopSize;
 324   if (_last->offset() < super_end) {
 325     RawBlock* padding = new RawBlock(RawBlock::PADDING, super_end - _last->offset());
 326     insert(_last, padding);
 327   }
 328 }
 329 
 330 RawBlock* FieldLayout::insert(RawBlock* slot, RawBlock* block) {
 331   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 332   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
 333   block->set_offset(slot->offset());
 334   slot->set_offset(slot->offset() + block->size());
 335   slot->set_size(slot->size() - block->size());
 336   block->set_prev_block(slot->prev_block());
 337   block->set_next_block(slot);
 338   slot->set_prev_block(block);
 339   if (block->prev_block() != NULL) {       // suspicious test
 340     block->prev_block()->set_next_block(block);
 341   }
 342   if (_blocks == slot) {
 343     _blocks = block;
 344   }
 345   if (_start == slot) {
 346     _start = block;
 347   }
 348   return block;
 349 }
 350 
 351 void FieldLayout::insert_per_offset(RawBlock* block) {
 352   if (_blocks == NULL) {
 353     _blocks = block;
 354   } else if (_blocks->offset() > block->offset()) {
 355     block->set_next_block(_blocks);
 356     _blocks->set_prev_block(block);
 357     _blocks = block;
 358   } else {
 359     RawBlock* b = _blocks;
 360     while (b->next_block() != NULL && b->next_block()->offset() < block->offset()) b = b->next_block();
 361     if (b->next_block() == NULL) {
 362       b->set_next_block(block);
 363       block->set_prev_block(b);
 364     } else {
 365       assert(b->next_block()->offset() >= block->offset(), "Sanity check");
 366       assert(b->next_block()->offset() > block->offset() || b->next_block()->kind() == RawBlock::EMPTY, "Sanity check");
 367       block->set_next_block(b->next_block());
 368       b->next_block()->set_prev_block(block);
 369       block->set_prev_block(b);
 370       b->set_next_block(block);
 371     }
 372   }
 373 }
 374 
 375 void FieldLayout::remove(RawBlock* block) {
 376   assert(block != NULL, "Sanity check");
 377   assert(block != _last, "Sanity check");
 378   if (_blocks == block) {
 379     _blocks = block->next_block();
 380     if (_blocks != NULL) {
 381       _blocks->set_prev_block(NULL);
 382     }
 383   } else {
 384     assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
 385     block->prev_block()->set_next_block(block->next_block());
 386     block->next_block()->set_prev_block(block->prev_block());
 387   }
 388   if (block == _start) {
 389     _start = block->prev_block();
 390   }
 391 }
 392 
 393 void FieldLayout::print(outputStream* output) {
 394   ResourceMark rm;
 395   RawBlock* b = _blocks;
 396   while(b != _last) {
 397     switch(b->kind()) {
 398     case RawBlock::REGULAR: {
 399       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 400       output->print_cr("  %d %s %d %d %s %s",
 401           b->offset(),
 402           "REGULAR",
 403           b->size(),
 404           b->alignment(),
 405           fi->signature(_cp)->as_C_string(),
 406           fi->name(_cp)->as_C_string());
 407       break;
 408     }
 409     case RawBlock::FLATTENED: {
 410       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 411       output->print_cr("  %d %s %d %d %s %s",
 412           b->offset(),
 413           "FLATTENED",
 414           b->size(),
 415           b->alignment(),
 416           fi->signature(_cp)->as_C_string(),
 417           fi->name(_cp)->as_C_string());
 418       break;
 419     }
 420     case RawBlock::RESERVED:
 421       output->print_cr("  %d %s %d",
 422           b->offset(),
 423           "RESERVED",
 424           b->size());
 425       break;
 426     case RawBlock::INHERITED:
 427       output->print_cr("  %d %s %d",
 428           b->offset(),
 429           "INHERITED",
 430           b->size());
 431       break;
 432     case RawBlock::EMPTY:
 433       output->print_cr("  %d %s %d",
 434           b->offset(),
 435           "EMPTY",
 436           b->size());
 437       break;
 438     case RawBlock::PADDING:
 439       output->print_cr("  %d %s %d",
 440           b->offset(),
 441           "PADDING",
 442           b->size());
 443       break;
 444     }
 445     b = b->next_block();
 446   }
 447 }
 448 
 449 
 450 FieldLayoutBuilder::FieldLayoutBuilder(ClassFileParser* cfp, FieldLayoutInfo* info) {
 451   _cfp = cfp;
 452   _info = info;
 453   _fields = NULL;
 454   _root_group = NULL;
 455   _contended_groups = NULL;
 456   _static_fields = NULL;
 457   _layout = NULL;
 458   _static_layout = NULL;
 459   _nonstatic_oopmap_count = 0;
 460   // Inline class specific information
 461   _alignment = -1;
 462   _first_field_offset = -1;
 463   _exact_size_in_bytes = -1;
 464   _has_nonstatic_fields = false;
 465   _has_flattening_information = _cfp->is_value_type();
 466 }
 467 
 468 FieldGroup* FieldLayoutBuilder::get_contended_group(int g) {
 469   assert(g>0, "must only be called for named contended groups");
 470   if (_contended_groups == NULL) {
 471     _contended_groups = new FieldGroup(g);
 472     return _contended_groups;
 473   }
 474   FieldGroup* group = _contended_groups;
 475   while(group->next() != NULL) {
 476     if (group->contended_group() == g) break;
 477     group = group->next();
 478   }
 479   if (group->contended_group() == g) return group;
 480   group->set_next(new FieldGroup(g));
 481   return group->next();
 482 }
 483 
 484 void FieldLayoutBuilder::prologue() {
 485   _layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 486   const InstanceKlass* super_klass = _cfp->_super_klass;
 487   _layout->initialize_instance_layout(super_klass);
 488   if (super_klass != NULL) {
 489     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 490   }
 491   _static_layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 492   _static_layout->initialize_static_layout();
 493   _static_fields = new FieldGroup();
 494   _root_group = new FieldGroup();
 495   _contended_groups = NULL;
 496 }
 497 
 498 /* Field sorting for regular (non-inline) classes:
 499  *   - fields are sorted in static and non-static fields
 500  *   - non-static fields are also sorted according to their contention group
 501  *     (support of the @Contended annotation)
 502  *   - @Contended annotation is ignored for static fields
 503  *   - field flattening decisions are taken in this method
 504  */
 505 void FieldLayoutBuilder::regular_field_sorting(TRAPS) {
 506   assert(!_cfp->is_value_type(), "Should only be used for non-inline classes");
 507   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 508     FieldGroup* group = NULL;
 509     if (fs.access_flags().is_static()) {
 510       group = _static_fields;
 511     } else {
 512       _has_nonstatic_fields = true;
 513       if (fs.is_contended()) {
 514         int g = fs.contended_group();
 515         if (g == 0) {
 516           // default group means the field is alone in its contended group
 517           group = new FieldGroup(true);
 518           group->set_next(_contended_groups);
 519           _contended_groups = group;
 520         } else {
 521           group = get_contended_group(g);
 522         }
 523       } else {
 524         group = _root_group;
 525       }
 526     }
 527     assert(group != NULL, "invariant");
 528     BasicType type = vmSymbols::signature_type(fs.signature());
 529     switch(type) {
 530     case T_BYTE:
 531     case T_CHAR:
 532     case T_DOUBLE:
 533     case T_FLOAT:
 534     case T_INT:
 535     case T_LONG:
 536     case T_SHORT:
 537     case T_BOOLEAN:
 538       group->add_primitive_field(fs, type);
 539       break;
 540     case T_OBJECT:
 541     case T_ARRAY:
 542       if (group != _static_fields) _nonstatic_oopmap_count++;
 543       group->add_oop_field(fs);
 544       break;
 545     case T_VALUETYPE: {
 546       if (group == _static_fields) {
 547         // static fields are never flattened
 548         group->add_oop_field(fs);
 549       } else {
 550         _has_flattening_information = true;
 551         // Flattening decision to be taken here
 552         // This code assumes all verification have been performed before
 553         // (field is a flattenable field, field's type has been loaded
 554         // and it is an inline klass
 555         Klass* klass =
 556             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 557                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 558                 _cfp->_protection_domain, true, CHECK);
 559         assert(klass != NULL, "Sanity check");
 560         ValueKlass* vk = ValueKlass::cast(klass);
 561         bool flattened = (ValueFieldMaxFlatSize < 0)
 562                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 563         if (flattened) {
 564           group->add_flattened_field(fs, vk);
 565           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 566           fs.set_flattened(true);
 567         } else {
 568           _nonstatic_oopmap_count++;
 569           group->add_oop_field(fs);
 570         }
 571       }
 572       break;
 573     }
 574     default:
 575       fatal("Something wrong?");
 576     }
 577   }
 578 }
 579 /* Field sorting for inline classes:
 580  *   - because inline classes are immutable, the @Contended annotation is ignored
 581  *     when computing their layout (with only read operation, there's no false
 582  *     sharing issue)
 583  *   - this method also records the alignment of the field with the most
 584  *     constraining alignment, this value is then used as the alignment
 585  *     constraint when flattening this inline type into another container
 586  *   - field flattening decisions are taken in this method (those decisions are
 587  *     currently only based in the size of the fields to be flattened, the size
 588  *     of the resulting instance is not considered)
 589  */
 590 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 591   assert(_cfp->is_value_type(), "Should only be used for inline classes");
 592   int alignment = 1;
 593   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 594     FieldGroup* group = NULL;
 595     int field_alignment = 1;
 596     if (fs.access_flags().is_static()) {
 597       group = _static_fields;
 598     } else {
 599       _has_nonstatic_fields = true;
 600       group = _root_group;
 601     }
 602     assert(group != NULL, "invariant");
 603     BasicType type = vmSymbols::signature_type(fs.signature());
 604     switch(type) {
 605     case T_BYTE:
 606     case T_CHAR:
 607     case T_DOUBLE:
 608     case T_FLOAT:
 609     case T_INT:
 610     case T_LONG:
 611     case T_SHORT:
 612     case T_BOOLEAN:
 613       if (group != _static_fields) {
 614         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 615       }
 616       group->add_primitive_field(fs, type);
 617       break;
 618     case T_OBJECT:
 619     case T_ARRAY:
 620       if (group != _static_fields) {
 621         _nonstatic_oopmap_count++;
 622         field_alignment = type2aelembytes(type); // alignment == size for oops
 623       }
 624       group->add_oop_field(fs);
 625       break;
 626     case T_VALUETYPE: {
 627       if (group == _static_fields) {
 628         // static fields are never flattened
 629         group->add_oop_field(fs);
 630       } else {
 631         // Flattening decision to be taken here
 632         // This code assumes all verifications have been performed before
 633         // (field is a flattenable field, field's type has been loaded
 634         // and it is an inline klass
 635         Klass* klass =
 636             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 637                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 638                 _cfp->_protection_domain, true, CHECK);
 639         assert(klass != NULL, "Sanity check");
 640         ValueKlass* vk = ValueKlass::cast(klass);
 641         bool flattened = (ValueFieldMaxFlatSize < 0)
 642                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 643         if (flattened) {
 644           group->add_flattened_field(fs, vk);
 645           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 646           field_alignment = vk->get_alignment();
 647           fs.set_flattened(true);
 648         } else {
 649           _nonstatic_oopmap_count++;
 650           field_alignment = type2aelembytes(T_OBJECT);
 651           group->add_oop_field(fs);
 652         }
 653       }
 654       break;
 655     }
 656     default:
 657       fatal("Unexpected BasicType");
 658     }
 659     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 660   }
 661   _alignment = alignment;
 662   if (_cfp->is_value_type() && (!_has_nonstatic_fields)) {
 663     // There are a number of fixes required throughout the type system and JIT
 664     _cfp->throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet");
 665     return;
 666   }
 667 }
 668 
 669 /* Computation of regular classes layout is an evolution of the previous default layout
 670  * (FieldAllocationStyle 1):
 671  *   - flattened fields are allocated first (because they have potentially the
 672  *     least regular shapes, and are more likely to create empty slots between them,
 673  *     which can then be used to allocation primitive or oop fields). Allocation is
 674  *     performed from the biggest to the smallest flattened field.
 675  *   - then primitive fields (from the biggest to the smallest)
 676  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 677  *     and reduce the work of the GC).
 678  */
 679 void FieldLayoutBuilder::compute_regular_layout(TRAPS) {
 680   bool need_tail_padding = false;
 681   prologue();
 682   regular_field_sorting(CHECK);
 683   const bool is_contended_class = _cfp->_parsed_annotations->is_contended();
 684   if (is_contended_class) {
 685     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 686     // insertion is currently easy because the current strategy doesn't try to fill holes
 687     // in super classes layouts => the _start block is by consequence the _last_block
 688     _layout->insert(_layout->start(), padding);
 689     need_tail_padding = true;
 690   }
 691   _layout->add(_root_group->flattened_fields());
 692   _layout->add(_root_group->primitive_fields());
 693   _layout->add_contiguously(_root_group->oop_fields());
 694   FieldGroup* cg = _contended_groups;
 695   while (cg != NULL) {
 696     RawBlock* start = _layout->last_block();
 697     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 698     _layout->insert(start, padding);
 699     _layout->add(cg->flattened_fields(), start);
 700     _layout->add(cg->primitive_fields(), start);
 701     _layout->add(cg->oop_fields(), start);
 702     need_tail_padding = true;
 703     cg = cg->next();
 704   }
 705   if (need_tail_padding) {
 706     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 707     _layout->insert(_layout->last_block(), padding);
 708   }
 709   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 710   _static_layout->add(this->_static_fields->primitive_fields());
 711 
 712   epilogue();
 713 }
 714 
 715 /* Computation of inline classes has a slightly different strategy than for
 716  * regular classes. Regular classes have their oop fields allocated at the end
 717  * of the layout to increase GC performances. Unfortunately, this strategy
 718  * increases the number of empty slots inside an instance. Because the purpose
 719  * of inline classes is to be embedded into other containers, it is critical
 720  * to keep their size as small as possible. For this reason, the allocation
 721  * strategy is:
 722  *   - flattened fields are allocated first (because they have potentially the
 723  *     least regular shapes, and are more likely to create empty slots between them,
 724  *     which can then be used to allocation primitive or oop fields). Allocation is
 725  *     performed from the biggest to the smallest flattened field.
 726  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 727  *     and reduce the work of the GC)
 728  *   - then primitive fields (from the biggest to the smallest)
 729  */
 730 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
 731   prologue();
 732   inline_class_field_sorting(CHECK);
 733   if (_layout->start()->offset() % _alignment != 0) {
 734     RawBlock* padding = new RawBlock(RawBlock::PADDING, _alignment - (_layout->start()->offset() % _alignment));
 735     _layout->insert(_layout->start(), padding);
 736     _layout->set_start(padding->next_block());
 737   }
 738   _first_field_offset = _layout->start()->offset();
 739   _layout->add(_root_group->flattened_fields());
 740   _layout->add_contiguously(_root_group->oop_fields());
 741   _layout->add(_root_group->primitive_fields());
 742   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->start()->offset();
 743 
 744   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 745   _static_layout->add(this->_static_fields->primitive_fields());
 746 
 747   epilogue();
 748 }
 749 
 750 void FieldLayoutBuilder::epilogue() {
 751   // Computing oopmaps
 752   int super_oop_map_count = (_cfp->_super_klass == NULL) ? 0 :_cfp->_super_klass->nonstatic_oop_map_count();
 753   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
 754 
 755   OopMapBlocksBuilder* nonstatic_oop_maps =
 756       new OopMapBlocksBuilder(max_oop_map_count, Thread::current());
 757   if (super_oop_map_count > 0) {
 758     nonstatic_oop_maps->initialize_inherited_blocks(_cfp->_super_klass->start_of_nonstatic_oop_maps(),
 759         _cfp->_super_klass->nonstatic_oop_map_count());
 760   }
 761   if (_root_group->oop_fields() != NULL) {
 762     nonstatic_oop_maps->add(_root_group->oop_fields()->offset(), _root_group->oop_count());
 763   }
 764   RawBlock* ff = _root_group->flattened_fields();
 765   while (ff != NULL) {
 766     ValueKlass* vklass = ff->value_klass();
 767     assert(vklass != NULL, "Should have been initialized");
 768     if (vklass->contains_oops()) { // add flatten oop maps
 769       int diff = ff->offset() - vklass->first_field_offset();
 770       const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 771       const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count();
 772       while (map < last_map) {
 773         nonstatic_oop_maps->add(map->offset() + diff, map->count());
 774         map++;
 775       }
 776     }
 777     ff = ff->next_field();
 778   }
 779   FieldGroup* cg = _contended_groups;
 780   while (cg != NULL) {
 781     if (cg->oop_count() > 0) {
 782       nonstatic_oop_maps->add(cg->oop_fields()->offset(), cg->oop_count());
 783     }
 784     RawBlock* ff = cg->flattened_fields();
 785     while (ff != NULL) {
 786       ValueKlass* vklass = ff->value_klass();
 787       assert(vklass != NULL, "Should have been initialized");
 788       if (vklass->contains_oops()) { // add flatten oop maps
 789         int diff = ff->offset() - vklass->first_field_offset();
 790         const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 791         const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count();
 792         while (map < last_map) {
 793           nonstatic_oop_maps->add(map->offset() + diff, map->count());
 794           map++;
 795         }
 796       }
 797       ff = ff->next_field();
 798     }
 799     cg = cg->next();
 800   }
 801 
 802   // nonstatic_oop_maps->compact(Thread::current());
 803 
 804   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 805   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 806   int static_fields_size = (static_fields_end -
 807       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
 808   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
 809 
 810   // Pass back information needed for InstanceKlass creation
 811 
 812   _info->oop_map_blocks = nonstatic_oop_maps;
 813   _info->instance_size = align_object_size(instance_end / wordSize);
 814   _info->static_field_size = static_fields_size;
 815   _info->nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
 816   _info->has_nonstatic_fields = _has_nonstatic_fields;
 817 
 818   if (PrintNewLayout || (PrintFlattenableLayouts && _has_flattening_information)) {
 819     ResourceMark rm;
 820     tty->print_cr("Layout of class %s", _cfp->_class_name->as_C_string());
 821     tty->print_cr("|offset|kind|size|alignment|signature|name|");
 822     tty->print_cr("Instance fields:");
 823     _layout->print(tty);
 824     tty->print_cr("Static fields");
 825     _static_layout->print(tty);
 826     nonstatic_oop_maps->print_on(tty);
 827     tty->print_cr("Instance size = %d * heapWordSize", _info->instance_size);
 828     tty->print_cr("Non-static field size = %d * heapWordSize", _info->nonstatic_field_size);
 829     tty->print_cr("Static field size = %d * heapWordSize", _info->static_field_size);
 830     if (_cfp->is_value_type()) {
 831       tty->print_cr("alignment = %d", _alignment);
 832       tty->print_cr("exact_size_in_bytes = %d", _exact_size_in_bytes);
 833       tty->print_cr("first_field_offset = %d", _first_field_offset);
 834     }
 835     tty->print_cr("---");
 836   }
 837 }