1 /*
   2  * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classFileParser.hpp"
  28 #include "classfile/fieldLayoutBuilder.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/array.hpp"
  31 #include "oops/fieldStreams.inline.hpp"
  32 #include "oops/instanceMirrorKlass.hpp"
  33 #include "oops/klass.inline.hpp"
  34 #include "oops/valueKlass.inline.hpp"
  35 #include "runtime/fieldDescriptor.inline.hpp"
  36 
  37 LayoutRawBlock::LayoutRawBlock(Kind kind, int size) :
  38   _next_block(NULL),
  39   _prev_block(NULL),
  40   _value_klass(NULL),
  41   _kind(kind),
  42   _offset(-1),
  43   _alignment(1),
  44   _size(size),
  45   _field_index(-1),
  46   _is_reference(false) {
  47   assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
  48          "Otherwise, should use the constructor with a field index argument");
  49   assert(size > 0, "Sanity check");
  50 }
  51 
  52 
  53 LayoutRawBlock::LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference) :
  54  _next_block(NULL),
  55  _prev_block(NULL),
  56  _value_klass(NULL),
  57  _kind(kind),
  58  _offset(-1),
  59  _alignment(alignment),
  60  _size(size),
  61  _field_index(index),
  62  _is_reference(is_reference) {
  63   assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED,
  64          "Other kind do not have a field index");
  65   assert(size > 0, "Sanity check");
  66   assert(alignment > 0, "Sanity check");
  67 }
  68 
  69 bool LayoutRawBlock::fit(int size, int alignment) {
  70   int adjustment = 0;
  71   if ((_offset % alignment) != 0) {
  72     adjustment = alignment - (_offset % alignment);
  73   }
  74   return _size >= size + adjustment;
  75 }
  76 
  77 FieldGroup::FieldGroup(int contended_group) :
  78   _next(NULL),
  79   _primitive_fields(NULL),
  80   _oop_fields(NULL),
  81   _flattened_fields(NULL),
  82   _contended_group(contended_group),  // -1 means no contended group, 0 means default contended group
  83   _oop_count(0) {}
  84 
  85 void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
  86   int size = type2aelembytes(type);
  87   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
  88   if (_primitive_fields == NULL) {
  89     _primitive_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
  90   }
  91   _primitive_fields->append(block);
  92 }
  93 
  94 void FieldGroup::add_oop_field(AllFieldStream fs) {
  95   int size = type2aelembytes(T_OBJECT);
  96   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for oops */, true);
  97   if (_oop_fields == NULL) {
  98     _oop_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
  99   }
 100   _oop_fields->append(block);
 101   _oop_count++;
 102 }
 103 
 104 void FieldGroup::add_flattened_field(AllFieldStream fs, ValueKlass* vk) {
 105   // _flattened_fields list might be merged with the _primitive_fields list in the future
 106   LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::FLATTENED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
 107   block->set_value_klass(vk);
 108   if (_flattened_fields == NULL) {
 109     _flattened_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
 110   }
 111   _flattened_fields->append(block);
 112 }
 113 
 114 void FieldGroup::sort_by_size() {
 115   if (_primitive_fields != NULL) {
 116     _primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
 117   }
 118   if (_flattened_fields != NULL) {
 119     _flattened_fields->sort(LayoutRawBlock::compare_size_inverted);
 120   }
 121 }
 122 
 123 FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) :
 124   _fields(fields),
 125   _cp(cp),
 126   _blocks(NULL),
 127   _start(_blocks),
 128   _last(_blocks) {}
 129 
 130 void FieldLayout::initialize_static_layout() {
 131   _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 132   _blocks->set_offset(0);
 133   _last = _blocks;
 134   _start = _blocks;
 135   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
 136   // during bootstrapping, the size of the java.lang.Class is still not known when layout
 137   // of static field is computed. Field offsets are fixed later when the size is known
 138   // (see java_lang_Class::fixup_mirror())
 139   if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
 140     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
 141     _blocks->set_offset(0);
 142   }
 143 }
 144 
 145 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klass) {
 146   if (super_klass == NULL) {
 147     _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 148     _blocks->set_offset(0);
 149     _last = _blocks;
 150     _start = _blocks;
 151     insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 152   } else {
 153     bool has_fields = reconstruct_layout(super_klass);
 154     fill_holes(super_klass);
 155     if ((UseEmptySlotsInSupers && !super_klass->has_contended_annotations()) || !has_fields) {
 156       _start = _blocks; // Setting _start to _blocks instead of _last would allow subclasses
 157       // to allocate fields in empty slots of their super classes
 158     } else {
 159       _start = _last;
 160     }
 161   }
 162 }
 163 
 164 LayoutRawBlock* FieldLayout::first_field_block() {
 165   LayoutRawBlock* block = _blocks;
 166   while (block != NULL
 167          && block->kind() != LayoutRawBlock::INHERITED
 168          && block->kind() != LayoutRawBlock::REGULAR
 169          && block->kind() != LayoutRawBlock::FLATTENED) {
 170     block = block->next_block();
 171   }
 172   return block;
 173 }
 174 
 175 // Insert a set of fields into a layout.
 176 // For each field, search for an empty slot able to fit the field
 177 // (satisfying both size and alignment requirements), if none is found,
 178 // add the field at the end of the layout.
 179 // Fields cannot be inserted before the block specified in the "start" argument
 180 void FieldLayout::add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
 181   if (list == NULL) return;
 182   if (start == NULL) start = this->_start;
 183   bool last_search_success = false;
 184   int last_size = 0;
 185   int last_alignment = 0;
 186   for (int i = 0; i < list->length(); i ++) {
 187     LayoutRawBlock* b = list->at(i);
 188     LayoutRawBlock* cursor = NULL;
 189     LayoutRawBlock* candidate = NULL;
 190     // if start is the last block, just append the field
 191     if (start == last_block()) {
 192       candidate = last_block();
 193     }
 194     // Before iterating over the layout to find an empty slot fitting the field's requirements,
 195     // check if the previous field had the same requirements and if the search for a fitting slot
 196     // was successful. If the requirements were the same but the search failed, a new search will
 197     // fail the same way, so just append the field at the of the layout.
 198     else  if (b->size() == last_size && b->alignment() == last_alignment && !last_search_success) {
 199       candidate = last_block();
 200     } else {
 201       // Iterate over the layout to find an empty slot fitting the field's requirements
 202       last_size = b->size();
 203       last_alignment = b->alignment();
 204       cursor = last_block()->prev_block();
 205       assert(cursor != NULL, "Sanity check");
 206       last_search_success = true;
 207 
 208       while (cursor != start) {
 209         if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) {
 210           if (candidate == NULL || cursor->size() < candidate->size()) {
 211             candidate = cursor;
 212           }
 213         }
 214         cursor = cursor->prev_block();
 215       }
 216       if (candidate == NULL) {
 217         candidate = last_block();
 218         last_search_success = false;
 219       }
 220       assert(candidate != NULL, "Candidate must not be null");
 221       assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
 222       assert(candidate->fit(b->size(), b->alignment()), "Candidate must be able to store the block");
 223     }
 224     insert_field_block(candidate, b);
 225   }
 226 }
 227 
 228 // Used for classes with hard coded field offsets, insert a field at the specified offset */
 229 void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
 230   assert(block != NULL, "Sanity check");
 231   block->set_offset(offset);
 232   if (start == NULL) {
 233     start = this->_start;
 234   }
 235   LayoutRawBlock* slot = start;
 236   while (slot != NULL) {
 237     if ((slot->offset() <= block->offset() && (slot->offset() + slot->size()) > block->offset()) ||
 238         slot == _last){
 239       assert(slot->kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot");
 240       assert(slot->size() >= block->offset() + block->size() ,"Matching slot must be big enough");
 241       if (slot->offset() < block->offset()) {
 242         int adjustment = block->offset() - slot->offset();
 243         LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
 244         insert(slot, adj);
 245       }
 246       insert(slot, block);
 247       if (slot->size() == 0) {
 248         remove(slot);
 249       }
 250       FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
 251       return;
 252     }
 253     slot = slot->next_block();
 254   }
 255   fatal("Should have found a matching slot above, corrupted layout or invalid offset");
 256 }
 257 
 258 // The allocation logic uses a best fit strategy: the set of fields is allocated
 259 // in the first empty slot big enough to contain the whole set ((including padding
 260 // to fit alignment constraints).
 261 void FieldLayout::add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
 262   if (list == NULL) return;
 263   if (start == NULL) {
 264     start = _start;
 265   }
 266   // This code assumes that if the first block is well aligned, the following
 267   // blocks would naturally be well aligned (no need for adjustment)
 268   int size = 0;
 269   for (int i = 0; i < list->length(); i++) {
 270     size += list->at(i)->size();
 271   }
 272 
 273   LayoutRawBlock* candidate = NULL;
 274   if (start == last_block()) {
 275     candidate = last_block();
 276   } else {
 277     LayoutRawBlock* first = list->at(0);
 278     candidate = last_block()->prev_block();
 279     while (candidate->kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) {
 280       if (candidate == start) {
 281         candidate = last_block();
 282         break;
 283       }
 284       candidate = candidate->prev_block();
 285     }
 286     assert(candidate != NULL, "Candidate must not be null");
 287     assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
 288     assert(candidate->fit(size, first->alignment()), "Candidate must be able to store the whole contiguous block");
 289   }
 290 
 291   for (int i = 0; i < list->length(); i++) {
 292     LayoutRawBlock* b = list->at(i);
 293     insert_field_block(candidate, b);
 294     assert((candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
 295   }
 296 }
 297 
 298 LayoutRawBlock* FieldLayout::insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block) {
 299   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 300   if (slot->offset() % block->alignment() != 0) {
 301     int adjustment = block->alignment() - (slot->offset() % block->alignment());
 302     LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
 303     insert(slot, adj);
 304   }
 305   insert(slot, block);
 306   if (slot->size() == 0) {
 307     remove(slot);
 308   }
 309   FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
 310   return block;
 311 }
 312 
 313 bool FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
 314   bool has_instance_fields = false;
 315   GrowableArray<LayoutRawBlock*>* all_fields = new GrowableArray<LayoutRawBlock*>(32);
 316   while (ik != NULL) {
 317     for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
 318       BasicType type = Signature::basic_type(fs.signature());
 319       // distinction between static and non-static fields is missing
 320       if (fs.access_flags().is_static()) continue;
 321       has_instance_fields = true;
 322       LayoutRawBlock* block;
 323       if (type == T_VALUETYPE) {
 324         ValueKlass* vk = ValueKlass::cast(ik->get_value_field_klass(fs.index()));
 325         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, vk->get_exact_size_in_bytes(),
 326                                    vk->get_alignment(), false);
 327 
 328       } else {
 329         int size = type2aelembytes(type);
 330         // INHERITED blocks are marked as non-reference because oop_maps are handled by their holder class
 331         block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, size, size, false);
 332       }
 333       block->set_offset(fs.offset());
 334       all_fields->append(block);
 335     }
 336     ik = ik->super() == NULL ? NULL : InstanceKlass::cast(ik->super());
 337   }
 338   all_fields->sort(LayoutRawBlock::compare_offset);
 339   _blocks = new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
 340   _blocks->set_offset(0);
 341   _last = _blocks;
 342   for(int i = 0; i < all_fields->length(); i++) {
 343     LayoutRawBlock* b = all_fields->at(i);
 344     _last->set_next_block(b);
 345     b->set_prev_block(_last);
 346     _last = b;
 347   }
 348   _start = _blocks;
 349   return has_instance_fields;
 350 }
 351 
 352 // Called during the reconstruction of a layout, after fields from super
 353 // classes have been inserted. It fills unused slots between inserted fields
 354 // with EMPTY blocks, so the regular field insertion methods would work.
 355 // This method handles classes with @Contended annotations differently
 356 // by inserting PADDING blocks instead of EMPTY block to prevent subclasses'
 357 // fields to interfere with contended fields/classes.
 358 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
 359   assert(_blocks != NULL, "Sanity check");
 360   assert(_blocks->offset() == 0, "first block must be at offset zero");
 361   LayoutRawBlock::Kind filling_type = super_klass->has_contended_annotations() ? LayoutRawBlock::PADDING: LayoutRawBlock::EMPTY;
 362   LayoutRawBlock* b = _blocks;
 363   while (b->next_block() != NULL) {
 364     if (b->next_block()->offset() > (b->offset() + b->size())) {
 365       int size = b->next_block()->offset() - (b->offset() + b->size());
 366       LayoutRawBlock* empty = new LayoutRawBlock(filling_type, size);
 367       empty->set_offset(b->offset() + b->size());
 368       empty->set_next_block(b->next_block());
 369       b->next_block()->set_prev_block(empty);
 370       b->set_next_block(empty);
 371       empty->set_prev_block(b);
 372     }
 373     b = b->next_block();
 374   }
 375   assert(b->next_block() == NULL, "Invariant at this point");
 376   assert(b->kind() != LayoutRawBlock::EMPTY, "Sanity check");
 377   // If the super class has @Contended annotation, a padding block is
 378   // inserted at the end to ensure that fields from the subclasses won't share
 379   // the cache line of the last field of the contended class
 380   if (super_klass->has_contended_annotations()) {
 381     LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
 382     p->set_offset(b->offset() + b->size());
 383     b->set_next_block(p);
 384     p->set_prev_block(b);
 385     b = p;
 386   }
 387   if (!UseEmptySlotsInSupers) {
 388     // Add an empty slots to align fields of the subclass on a heapOopSize boundary
 389     // in order to emulate the behavior of the previous algorithm
 390     int align = (b->offset() + b->size()) % heapOopSize;
 391     if (align != 0) {
 392       int sz = heapOopSize - align;
 393       LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::EMPTY, sz);
 394       p->set_offset(b->offset() + b->size());
 395       b->set_next_block(p);
 396       p->set_prev_block(b);
 397       b = p;
 398     }
 399   }
 400   LayoutRawBlock* last = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
 401   last->set_offset(b->offset() + b->size());
 402   assert(last->offset() > 0, "Sanity check");
 403   b->set_next_block(last);
 404   last->set_prev_block(b);
 405   _last = last;
 406 }
 407 
 408 LayoutRawBlock* FieldLayout::insert(LayoutRawBlock* slot, LayoutRawBlock* block) {
 409   assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 410   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
 411   block->set_offset(slot->offset());
 412   slot->set_offset(slot->offset() + block->size());
 413   assert((slot->size() - block->size()) < slot->size(), "underflow checking");
 414   assert(slot->size() - block->size() >= 0, "no negative size allowed");
 415   slot->set_size(slot->size() - block->size());
 416   block->set_prev_block(slot->prev_block());
 417   block->set_next_block(slot);
 418   slot->set_prev_block(block);
 419   if (block->prev_block() != NULL) {
 420     block->prev_block()->set_next_block(block);
 421   }
 422   if (_blocks == slot) {
 423     _blocks = block;
 424   }
 425   return block;
 426 }
 427 
 428 void FieldLayout::remove(LayoutRawBlock* block) {
 429   assert(block != NULL, "Sanity check");
 430   assert(block != _last, "Sanity check");
 431   if (_blocks == block) {
 432     _blocks = block->next_block();
 433     if (_blocks != NULL) {
 434       _blocks->set_prev_block(NULL);
 435     }
 436   } else {
 437     assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
 438     block->prev_block()->set_next_block(block->next_block());
 439     block->next_block()->set_prev_block(block->prev_block());
 440   }
 441   if (block == _start) {
 442     _start = block->prev_block();
 443   }
 444 }
 445 
 446 void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlass* super) {
 447   ResourceMark rm;
 448   LayoutRawBlock* b = _blocks;
 449   while(b != _last) {
 450     switch(b->kind()) {
 451     case LayoutRawBlock::REGULAR: {
 452       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 453       output->print_cr(" @%d \"%s\" %s %d/%d %s",
 454                        b->offset(),
 455                        fi->name(_cp)->as_C_string(),
 456                        fi->signature(_cp)->as_C_string(),
 457                        b->size(),
 458                        b->alignment(),
 459                        "REGULAR");
 460       break;
 461     }
 462     case LayoutRawBlock::FLATTENED: {
 463       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 464       output->print_cr(" @%d \"%s\" %s %d/%d %s",
 465                        b->offset(),
 466                        fi->name(_cp)->as_C_string(),
 467                        fi->signature(_cp)->as_C_string(),
 468                        b->size(),
 469                        b->alignment(),
 470                        "FLATTENED");
 471       break;
 472     }
 473     case LayoutRawBlock::RESERVED: {
 474       output->print_cr(" @%d %d/- %s",
 475                        b->offset(),
 476                        b->size(),
 477                        "RESERVED");
 478       break;
 479     }
 480     case LayoutRawBlock::INHERITED: {
 481       assert(!is_static, "Static fields are not inherited in layouts");
 482       assert(super != NULL, "super klass must be provided to retrieve inherited fields info");
 483       bool found = false;
 484       const InstanceKlass* ik = super;
 485       while (!found && ik != NULL) {
 486         for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
 487           if (fs.offset() == b->offset()) {
 488             output->print_cr(" @%d \"%s\" %s %d/%d %s",
 489                 b->offset(),
 490                 fs.name()->as_C_string(),
 491                 fs.signature()->as_C_string(),
 492                 b->size(),
 493                 b->size(), // so far, alignment constraint == size, will change with Valhalla
 494                 "INHERITED");
 495             found = true;
 496             break;
 497           }
 498         }
 499         ik = ik->java_super();
 500       }
 501       break;
 502     }
 503     case LayoutRawBlock::EMPTY:
 504       output->print_cr(" @%d %d/1 %s",
 505                        b->offset(),
 506                        b->size(),
 507                        "EMPTY");
 508       break;
 509     case LayoutRawBlock::PADDING:
 510       output->print_cr(" @%d %d/1 %s",
 511                        b->offset(),
 512                        b->size(),
 513                        "PADDING");
 514       break;
 515     }
 516     b = b->next_block();
 517   }
 518 }
 519 
 520 FieldLayoutBuilder::FieldLayoutBuilder(const Symbol* classname, const InstanceKlass* super_klass, ConstantPool* constant_pool,
 521                                        Array<u2>* fields, bool is_contended, bool is_value_type, ClassLoaderData* class_loader_data,
 522                                        Handle protection_domain, FieldLayoutInfo* info) :
 523   _classname(classname),
 524   _super_klass(super_klass),
 525   _constant_pool(constant_pool),
 526   _fields(fields),
 527   _info(info),
 528   _root_group(NULL),
 529   _contended_groups(GrowableArray<FieldGroup*>(8)),
 530   _static_fields(NULL),
 531   _layout(NULL),
 532   _static_layout(NULL),
 533   _class_loader_data(class_loader_data),
 534   _protection_domain(protection_domain),
 535   _nonstatic_oopmap_count(0),
 536   _alignment(-1),
 537   _first_field_offset(-1),
 538   _exact_size_in_bytes(-1),
 539   _has_nonstatic_fields(false),
 540   _is_contended(is_contended),
 541   _is_value_type(is_value_type),
 542   _has_flattening_information(is_value_type),
 543   _has_nonatomic_values(false),
 544   _atomic_field_count(0)
 545  {}
 546 
 547 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 548   assert(g > 0, "must only be called for named contended groups");
 549   FieldGroup* fg = NULL;
 550   for (int i = 0; i < _contended_groups.length(); i++) {
 551     fg = _contended_groups.at(i);
 552     if (fg->contended_group() == g) return fg;
 553   }
 554   fg = new FieldGroup(g);
 555   _contended_groups.append(fg);
 556   return fg;
 557 }
 558 
 559 void FieldLayoutBuilder::prologue() {
 560   _layout = new FieldLayout(_fields, _constant_pool);
 561   const InstanceKlass* super_klass = _super_klass;
 562   _layout->initialize_instance_layout(super_klass);
 563   if (super_klass != NULL) {
 564     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 565   }
 566   _static_layout = new FieldLayout(_fields, _constant_pool);
 567   _static_layout->initialize_static_layout();
 568   _static_fields = new FieldGroup();
 569   _root_group = new FieldGroup();
 570 }
 571 
 572 // Field sorting for regular (non-inline) classes:
 573 //   - fields are sorted in static and non-static fields
 574 //   - non-static fields are also sorted according to their contention group
 575 //     (support of the @Contended annotation)
 576 //   - @Contended annotation is ignored for static fields
 577 //   - field flattening decisions are taken in this method
 578 void FieldLayoutBuilder::regular_field_sorting() {
 579   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 580     FieldGroup* group = NULL;
 581     if (fs.access_flags().is_static()) {
 582       group = _static_fields;
 583     } else {
 584       _has_nonstatic_fields = true;
 585       _atomic_field_count++;  // we might decrement this
 586       if (fs.is_contended()) {
 587         int g = fs.contended_group();
 588         if (g == 0) {
 589           group = new FieldGroup(true);
 590           _contended_groups.append(group);
 591         } else {
 592           group = get_or_create_contended_group(g);
 593         }
 594       } else {
 595         group = _root_group;
 596       }
 597     }
 598     assert(group != NULL, "invariant");
 599     BasicType type = Signature::basic_type(fs.signature());
 600     switch(type) {
 601     case T_BYTE:
 602     case T_CHAR:
 603     case T_DOUBLE:
 604     case T_FLOAT:
 605     case T_INT:
 606     case T_LONG:
 607     case T_SHORT:
 608     case T_BOOLEAN:
 609       group->add_primitive_field(fs, type);
 610       break;
 611     case T_OBJECT:
 612     case T_ARRAY:
 613       if (group != _static_fields) _nonstatic_oopmap_count++;
 614       group->add_oop_field(fs);
 615       break;
 616     case T_VALUETYPE:
 617       if (group == _static_fields) {
 618         // static fields are never flattened
 619         group->add_oop_field(fs);
 620       } else {
 621         _has_flattening_information = true;
 622         // Flattening decision to be taken here
 623         // This code assumes all verification have been performed before
 624         // (field is a flattenable field, field's type has been loaded
 625         // and it is an inline klass
 626         Thread* THREAD = Thread::current();
 627         Klass* klass =
 628             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 629                                                                 Handle(THREAD, _class_loader_data->class_loader()),
 630                                                                 _protection_domain, true, THREAD);
 631         assert(klass != NULL, "Sanity check");
 632         ValueKlass* vk = ValueKlass::cast(klass);
 633         bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 &&
 634                                    (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize);
 635         bool too_atomic_to_flatten = vk->is_declared_atomic();
 636         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
 637         if (vk->is_naturally_atomic()) {
 638           too_atomic_to_flatten = false;
 639           //too_volatile_to_flatten = false; //FIXME
 640           // volatile fields are currently never flattened, this could change in the future
 641         }
 642         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 643           group->add_flattened_field(fs, vk);
 644           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 645           fs.set_flattened(true);
 646           if (!vk->is_atomic()) {  // flat and non-atomic: take note
 647             _has_nonatomic_values = true;
 648             _atomic_field_count--;  // every other field is atomic but this one
 649           }
 650         } else {
 651           _nonstatic_oopmap_count++;
 652           group->add_oop_field(fs);
 653         }
 654       }
 655       break;
 656     default:
 657       fatal("Something wrong?");
 658     }
 659   }
 660   _root_group->sort_by_size();
 661   _static_fields->sort_by_size();
 662   if (!_contended_groups.is_empty()) {
 663     for (int i = 0; i < _contended_groups.length(); i++) {
 664       _contended_groups.at(i)->sort_by_size();
 665     }
 666   }
 667 }
 668 
 669 /* Field sorting for inline classes:
 670  *   - because inline classes are immutable, the @Contended annotation is ignored
 671  *     when computing their layout (with only read operation, there's no false
 672  *     sharing issue)
 673  *   - this method also records the alignment of the field with the most
 674  *     constraining alignment, this value is then used as the alignment
 675  *     constraint when flattening this inline type into another container
 676  *   - field flattening decisions are taken in this method (those decisions are
 677  *     currently only based in the size of the fields to be flattened, the size
 678  *     of the resulting instance is not considered)
 679  */
 680 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 681   assert(_is_value_type, "Should only be used for inline classes");
 682   int alignment = 1;
 683   for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
 684     FieldGroup* group = NULL;
 685     int field_alignment = 1;
 686     if (fs.access_flags().is_static()) {
 687       group = _static_fields;
 688     } else {
 689       _has_nonstatic_fields = true;
 690       _atomic_field_count++;  // we might decrement this
 691       group = _root_group;
 692     }
 693     assert(group != NULL, "invariant");
 694     BasicType type = Signature::basic_type(fs.signature());
 695     switch(type) {
 696     case T_BYTE:
 697     case T_CHAR:
 698     case T_DOUBLE:
 699     case T_FLOAT:
 700     case T_INT:
 701     case T_LONG:
 702     case T_SHORT:
 703     case T_BOOLEAN:
 704       if (group != _static_fields) {
 705         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 706       }
 707       group->add_primitive_field(fs, type);
 708       break;
 709     case T_OBJECT:
 710     case T_ARRAY:
 711       if (group != _static_fields) {
 712         _nonstatic_oopmap_count++;
 713         field_alignment = type2aelembytes(type); // alignment == size for oops
 714       }
 715       group->add_oop_field(fs);
 716       break;
 717     case T_VALUETYPE: {
 718       if (group == _static_fields) {
 719         // static fields are never flattened
 720         group->add_oop_field(fs);
 721       } else {
 722         // Flattening decision to be taken here
 723         // This code assumes all verifications have been performed before
 724         // (field is a flattenable field, field's type has been loaded
 725         // and it is an inline klass
 726         Thread* THREAD = Thread::current();
 727         Klass* klass =
 728             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 729                 Handle(THREAD, _class_loader_data->class_loader()),
 730                 _protection_domain, true, CHECK);
 731         assert(klass != NULL, "Sanity check");
 732         ValueKlass* vk = ValueKlass::cast(klass);
 733         bool too_big_to_flatten = (ValueFieldMaxFlatSize >= 0 &&
 734                                    (vk->size_helper() * HeapWordSize) > ValueFieldMaxFlatSize);
 735         bool too_atomic_to_flatten = vk->is_declared_atomic();
 736         bool too_volatile_to_flatten = fs.access_flags().is_volatile();
 737         if (vk->is_naturally_atomic()) {
 738           too_atomic_to_flatten = false;
 739           //too_volatile_to_flatten = false; //FIXME
 740           // volatile fields are currently never flattened, this could change in the future
 741         }
 742         if (!(too_big_to_flatten | too_atomic_to_flatten | too_volatile_to_flatten)) {
 743           group->add_flattened_field(fs, vk);
 744           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 745           field_alignment = vk->get_alignment();
 746           fs.set_flattened(true);
 747           if (!vk->is_atomic()) {  // flat and non-atomic: take note
 748             _has_nonatomic_values = true;
 749             _atomic_field_count--;  // every other field is atomic but this one
 750           }
 751         } else {
 752           _nonstatic_oopmap_count++;
 753           field_alignment = type2aelembytes(T_OBJECT);
 754           group->add_oop_field(fs);
 755         }
 756       }
 757       break;
 758     }
 759     default:
 760       fatal("Unexpected BasicType");
 761     }
 762     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 763   }
 764   _alignment = alignment;
 765   if (!_has_nonstatic_fields) {
 766     // There are a number of fixes required throughout the type system and JIT
 767     Exceptions::fthrow(THREAD_AND_LOCATION,
 768                        vmSymbols::java_lang_ClassFormatError(),
 769                        "Value Types do not support zero instance size yet");
 770     return;
 771   }
 772 }
 773 
 774 void FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) {
 775   if (ContendedPaddingWidth > 0) {
 776     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
 777     _layout->insert(slot, padding);
 778   }
 779 }
 780 
 781 /* Computation of regular classes layout is an evolution of the previous default layout
 782  * (FieldAllocationStyle 1):
 783  *   - flattened fields are allocated first (because they have potentially the
 784  *     least regular shapes, and are more likely to create empty slots between them,
 785  *     which can then be used to allocation primitive or oop fields). Allocation is
 786  *     performed from the biggest to the smallest flattened field.
 787  *   - then primitive fields (from the biggest to the smallest)
 788  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 789  *     and reduce the work of the GC).
 790  */
 791 void FieldLayoutBuilder::compute_regular_layout() {
 792   bool need_tail_padding = false;
 793   prologue();
 794   regular_field_sorting();
 795   if (_is_contended) {
 796     _layout->set_start(_layout->last_block());
 797     // insertion is currently easy because the current strategy doesn't try to fill holes
 798     // in super classes layouts => the _start block is by consequence the _last_block
 799     insert_contended_padding(_layout->start());
 800     need_tail_padding = true;
 801   }
 802   _layout->add(_root_group->flattened_fields());
 803   _layout->add(_root_group->primitive_fields());
 804   _layout->add(_root_group->oop_fields());
 805 
 806   if (!_contended_groups.is_empty()) {
 807     for (int i = 0; i < _contended_groups.length(); i++) {
 808       FieldGroup* cg = _contended_groups.at(i);
 809       LayoutRawBlock* start = _layout->last_block();
 810       insert_contended_padding(start);
 811       _layout->add(_root_group->flattened_fields());
 812       _layout->add(cg->primitive_fields(), start);
 813       _layout->add(cg->oop_fields(), start);
 814       need_tail_padding = true;
 815     }
 816   }
 817 
 818   if (need_tail_padding) {
 819     insert_contended_padding(_layout->last_block());
 820   }
 821   _static_layout->add(_static_fields->flattened_fields());
 822   _static_layout->add_contiguously(_static_fields->oop_fields());
 823   _static_layout->add(_static_fields->primitive_fields());
 824 
 825   epilogue();
 826 }
 827 
 828 /* Computation of inline classes has a slightly different strategy than for
 829  * regular classes. Regular classes have their oop fields allocated at the end
 830  * of the layout to increase GC performances. Unfortunately, this strategy
 831  * increases the number of empty slots inside an instance. Because the purpose
 832  * of inline classes is to be embedded into other containers, it is critical
 833  * to keep their size as small as possible. For this reason, the allocation
 834  * strategy is:
 835  *   - flattened fields are allocated first (because they have potentially the
 836  *     least regular shapes, and are more likely to create empty slots between them,
 837  *     which can then be used to allocation primitive or oop fields). Allocation is
 838  *     performed from the biggest to the smallest flattened field.
 839  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 840  *     and reduce the work of the GC)
 841  *   - then primitive fields (from the biggest to the smallest)
 842  */
 843 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
 844   prologue();
 845   inline_class_field_sorting(CHECK);
 846   // Inline types are not polymorphic, so they cannot inherit fields.
 847   // By consequence, at this stage, the layout must be composed of a RESERVED
 848   // block, followed by an EMPTY block.
 849   assert(_layout->start()->kind() == LayoutRawBlock::RESERVED, "Unexpected");
 850   assert(_layout->start()->next_block()->kind() == LayoutRawBlock::EMPTY, "Unexpected");
 851   LayoutRawBlock* first_empty = _layout->start()->next_block();
 852   if (first_empty->offset() % _alignment != 0) {
 853     LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, _alignment - (first_empty->offset() % _alignment));
 854     _layout->insert(first_empty, padding);
 855     _layout->set_start(padding->next_block());
 856   }
 857 
 858   _layout->add(_root_group->flattened_fields());
 859   _layout->add(_root_group->oop_fields());
 860   _layout->add(_root_group->primitive_fields());
 861 
 862   LayoutRawBlock* first_field = _layout->first_field_block();
 863    if (first_field != NULL) {
 864      _first_field_offset = _layout->first_field_block()->offset();
 865      _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
 866    } else {
 867      // special case for empty value types
 868      _first_field_offset = _layout->blocks()->size();
 869      _exact_size_in_bytes = 0;
 870    }
 871   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->first_field_block()->offset();
 872 
 873   _static_layout->add(_static_fields->flattened_fields());
 874   _static_layout->add_contiguously(_static_fields->oop_fields());
 875   _static_layout->add(_static_fields->primitive_fields());
 876 
 877 
 878   epilogue();
 879 }
 880 
 881 // Compute layout of the java/lang/ref/Reference class according
 882 // to the hard coded offsets of its fields
 883 void FieldLayoutBuilder::compute_java_lang_ref_Reference_layout() {
 884   prologue();
 885   regular_field_sorting();
 886 
 887   assert(_contended_groups.is_empty(), "java.lang.Reference has no @Contended annotations");
 888   assert(_root_group->primitive_fields() == NULL, "java.lang.Reference has no nonstatic primitive fields");
 889   int field_count = 0;
 890   int offset = -1;
 891   for (int i = 0; i < _root_group->oop_fields()->length(); i++) {
 892     LayoutRawBlock* b = _root_group->oop_fields()->at(i);
 893     FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 894     if (fi->name(_constant_pool)->equals("referent")) {
 895       offset = java_lang_ref_Reference::referent_offset;
 896     } else if (fi->name(_constant_pool)->equals("queue")) {
 897       offset = java_lang_ref_Reference::queue_offset;
 898     } else if (fi->name(_constant_pool)->equals("next")) {
 899       offset = java_lang_ref_Reference::next_offset;
 900     } else if (fi->name(_constant_pool)->equals("discovered")) {
 901       offset = java_lang_ref_Reference::discovered_offset;
 902     }
 903     assert(offset != -1, "Unknown field");
 904     _layout->add_field_at_offset(b, offset);
 905     field_count++;
 906   }
 907   assert(field_count == 4, "Wrong number of fields in java.lang.ref.Reference");
 908 
 909   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 910   _static_layout->add(this->_static_fields->primitive_fields());
 911 
 912   epilogue();
 913 }
 914 
 915 // Compute layout of the boxing class according
 916 // to the hard coded offsets of their fields
 917 void FieldLayoutBuilder::compute_boxing_class_layout() {
 918   prologue();
 919   regular_field_sorting();
 920 
 921   assert(_contended_groups.is_empty(), "Boxing classes have no @Contended annotations");
 922   assert(_root_group->oop_fields() == NULL, "Boxing classes have no nonstatic oops fields");
 923   int field_count = 0;
 924   int offset = -1;
 925   for (int i = 0; i < _root_group->primitive_fields()->length(); i++) {
 926     LayoutRawBlock* b = _root_group->primitive_fields()->at(i);
 927     FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 928     assert(fi->name(_constant_pool)->equals("value"), "Boxing classes have a single nonstatic field named 'value'");
 929     BasicType type = Signature::basic_type(fi->signature(_constant_pool));
 930     offset = java_lang_boxing_object::value_offset_in_bytes(type);
 931     assert(offset != -1, "Unknown field");
 932     _layout->add_field_at_offset(b, offset);
 933     field_count++;
 934   }
 935   assert(field_count == 1, "Wrong number of fields for a boxing class");
 936 
 937   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 938   _static_layout->add(this->_static_fields->primitive_fields());
 939 
 940   epilogue();
 941 }
 942 
 943 void FieldLayoutBuilder::add_flattened_field_oopmap(OopMapBlocksBuilder* nonstatic_oop_maps,
 944                 ValueKlass* vklass, int offset) {
 945   int diff = offset - vklass->first_field_offset();
 946   const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 947   const OopMapBlock* last_map = map + vklass->nonstatic_oop_map_count();
 948   while (map < last_map) {
 949     nonstatic_oop_maps->add(map->offset() + diff, map->count());
 950     map++;
 951   }
 952 }
 953 
 954 void FieldLayoutBuilder::epilogue() {
 955   // Computing oopmaps
 956   int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
 957   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
 958 
 959   OopMapBlocksBuilder* nonstatic_oop_maps =
 960       new OopMapBlocksBuilder(max_oop_map_count);
 961   if (super_oop_map_count > 0) {
 962     nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
 963     _super_klass->nonstatic_oop_map_count());
 964   }
 965 
 966   if (_root_group->oop_fields() != NULL) {
 967     for (int i = 0; i < _root_group->oop_fields()->length(); i++) {
 968       LayoutRawBlock* b = _root_group->oop_fields()->at(i);
 969       nonstatic_oop_maps->add(b->offset(), 1);
 970     }
 971   }
 972 
 973   GrowableArray<LayoutRawBlock*>* ff = _root_group->flattened_fields();
 974   if (ff != NULL) {
 975     for (int i = 0; i < ff->length(); i++) {
 976       LayoutRawBlock* f = ff->at(i);
 977       ValueKlass* vk = f->value_klass();
 978       assert(vk != NULL, "Should have been initialized");
 979       if (vk->contains_oops()) {
 980         add_flattened_field_oopmap(nonstatic_oop_maps, vk, f->offset());
 981       }
 982     }
 983   }
 984 
 985   if (!_contended_groups.is_empty()) {
 986     for (int i = 0; i < _contended_groups.length(); i++) {
 987       FieldGroup* cg = _contended_groups.at(i);
 988       if (cg->oop_count() > 0) {
 989         assert(cg->oop_fields() != NULL && cg->oop_fields()->at(0) != NULL, "oop_count > 0 but no oop fields found");
 990         nonstatic_oop_maps->add(cg->oop_fields()->at(0)->offset(), cg->oop_count());
 991       }
 992     }
 993   }
 994 
 995   nonstatic_oop_maps->compact();
 996 
 997   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 998   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 999   int static_fields_size = (static_fields_end -
1000       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
1001   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
1002 
1003   // Pass back information needed for InstanceKlass creation
1004 
1005   _info->oop_map_blocks = nonstatic_oop_maps;
1006   _info->_instance_size = align_object_size(instance_end / wordSize);
1007   _info->_static_field_size = static_fields_size;
1008   _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
1009   _info->_has_nonstatic_fields = _has_nonstatic_fields;
1010 
1011   // A value type is naturally atomic if it has just one field, and
1012   // that field is simple enough.
1013   _info->_is_naturally_atomic = (_is_value_type &&
1014                                  (_atomic_field_count <= 1) &&
1015                                  !_has_nonatomic_values &&
1016                                  _contended_groups.is_empty());
1017   // This may be too restrictive, since if all the fields fit in 64
1018   // bits we could make the decision to align instances of this class
1019   // to 64-bit boundaries, and load and store them as single words.
1020   // And on machines which supported larger atomics we could similarly
1021   // allow larger values to be atomic, if properly aligned.
1022 
1023 
1024   if (PrintFieldLayout) {
1025     ResourceMark rm;
1026     tty->print_cr("Layout of class %s", _classname->as_C_string());
1027     tty->print_cr("Instance fields:");
1028     _layout->print(tty, false, _super_klass);
1029     tty->print_cr("Static fields:");
1030     _static_layout->print(tty, true, NULL);
1031     tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
1032     if (_is_value_type) {
1033       tty->print_cr("First field offset = %d", _first_field_offset);
1034       tty->print_cr("Alignment = %d bytes", _alignment);
1035       tty->print_cr("Exact size = %d bytes", _exact_size_in_bytes);
1036     }
1037     tty->print_cr("---");
1038   }
1039 }
1040 
1041 void FieldLayoutBuilder::build_layout(TRAPS) {
1042   if (_classname == vmSymbols::java_lang_ref_Reference()) {
1043     compute_java_lang_ref_Reference_layout();
1044   } else if (_classname == vmSymbols::java_lang_Boolean() ||
1045              _classname == vmSymbols::java_lang_Character() ||
1046              _classname == vmSymbols::java_lang_Float() ||
1047              _classname == vmSymbols::java_lang_Double() ||
1048              _classname == vmSymbols::java_lang_Byte() ||
1049              _classname == vmSymbols::java_lang_Short() ||
1050              _classname == vmSymbols::java_lang_Integer() ||
1051              _classname == vmSymbols::java_lang_Long()) {
1052       compute_boxing_class_layout();
1053   } else if (_is_value_type) {
1054     compute_inline_class_layout(CHECK);
1055   } else {
1056     compute_regular_layout();
1057   }
1058 }