1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94          "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 105       _dictionary = new AFLBinaryTreeDictionary(mr);
 106       break;
 107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 109     default:
 110       warning("dictionaryChoice: selected option not understood; using"
 111               " default BinaryTreeDictionary implementation instead.");
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 126                                            FreeBlockDictionary<FreeChunk>::atLeast);
 127     // The small linAB initially has all the space and will allocate
 128     // a chunk of any size.
 129     HeapWord* addr = (HeapWord*) fc;
 130     _smallLinearAllocBlock.set(addr, fc->size() ,
 131       1024*SmallForLinearAlloc, fc->size());
 132     // Note that _unallocated_block is not updated here.
 133     // Allocations from the linear allocation block should
 134     // update it.
 135   } else {
 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);
 156       DEBUG_ONLY(
 157         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 158       )
 159     }
 160     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 161   }
 162 }
 163 
 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
 165 // update the block offset table.  Removed initialize_threshold call because
 166 // CFLS does not use a block offset array for contiguous spaces.
 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 168                                     CompactPoint* cp, HeapWord* compact_top) {
 169   // q is alive
 170   // First check if we should switch compaction space
 171   assert(this == cp->space, "'this' should be current compaction space.");
 172   size_t compaction_max_size = pointer_delta(end(), compact_top);
 173   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 174     "virtual adjustObjectSize_v() method is not correct");
 175   size_t adjusted_size = adjustObjectSize(size);
 176   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 177          "no small fragments allowed");
 178   assert(minimum_free_block_size() == MinChunkSize,
 179          "for de-virtualized reference below");
 180   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 181   if (adjusted_size + MinChunkSize > compaction_max_size &&
 182       adjusted_size != compaction_max_size) {
 183     do {
 184       // switch to next compaction space
 185       cp->space->set_compaction_top(compact_top);
 186       cp->space = cp->space->next_compaction_space();
 187       if (cp->space == NULL) {
 188         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 189         assert(cp->gen != NULL, "compaction must succeed");
 190         cp->space = cp->gen->first_compaction_space();
 191         assert(cp->space != NULL, "generation must have a first compaction space");
 192       }
 193       compact_top = cp->space->bottom();
 194       cp->space->set_compaction_top(compact_top);
 195       // The correct adjusted_size may not be the same as that for this method
 196       // (i.e., cp->space may no longer be "this" so adjust the size again.
 197       // Use the virtual method which is not used above to save the virtual
 198       // dispatch.
 199       adjusted_size = cp->space->adjust_object_size_v(size);
 200       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 201       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 202     } while (adjusted_size > compaction_max_size);
 203   }
 204 
 205   // store the forwarding pointer into the mark word
 206   if ((HeapWord*)q != compact_top) {
 207     q->forward_to(oop(compact_top));
 208     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 209   } else {
 210     // if the object isn't moving we can just set the mark to the default
 211     // mark and handle it specially later on.
 212     q->init_mark();
 213     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 214   }
 215 
 216   compact_top += adjusted_size;
 217 
 218   // we need to update the offset table so that the beginnings of objects can be
 219   // found during scavenge.  Note that we are updating the offset table based on
 220   // where the object will be once the compaction phase finishes.
 221 
 222   // Always call cross_threshold().  A contiguous space can only call it when
 223   // the compaction_top exceeds the current threshold but not for an
 224   // non-contiguous space.
 225   cp->threshold =
 226     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 227   return compact_top;
 228 }
 229 
 230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 231 // and use of single_block instead of alloc_block.  The name here is not really
 232 // appropriate - maybe a more general name could be invented for both the
 233 // contiguous and noncontiguous spaces.
 234 
 235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 236   _bt.single_block(start, the_end);
 237   return end();
 238 }
 239 
 240 // Initialize them to NULL.
 241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 242   for (size_t i = 0; i < IndexSetSize; i++) {
 243     // Note that on platforms where objects are double word aligned,
 244     // the odd array elements are not used.  It is convenient, however,
 245     // to map directly from the object size to the array element.
 246     _indexedFreeList[i].reset(IndexSetSize);
 247     _indexedFreeList[i].set_size(i);
 248     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 249     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 250     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 252   }
 253 }
 254 
 255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 256   for (size_t i = 1; i < IndexSetSize; i++) {
 257     assert(_indexedFreeList[i].size() == (size_t) i,
 258       "Indexed free list sizes are incorrect");
 259     _indexedFreeList[i].reset(IndexSetSize);
 260     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 261     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 262     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 264   }
 265 }
 266 
 267 void CompactibleFreeListSpace::reset(MemRegion mr) {
 268   resetIndexedFreeListArray();
 269   dictionary()->reset();
 270   if (BlockOffsetArrayUseUnallocatedBlock) {
 271     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 272     // Everything's allocated until proven otherwise.
 273     _bt.set_unallocated_block(end());
 274   }
 275   if (!mr.is_empty()) {
 276     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 277     _bt.single_block(mr.start(), mr.word_size());
 278     FreeChunk* fc = (FreeChunk*) mr.start();
 279     fc->set_size(mr.word_size());
 280     if (mr.word_size() >= IndexSetSize ) {
 281       returnChunkToDictionary(fc);
 282     } else {
 283       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 284       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 285     }
 286   }
 287   _promoInfo.reset();
 288   _smallLinearAllocBlock._ptr = NULL;
 289   _smallLinearAllocBlock._word_size = 0;
 290 }
 291 
 292 void CompactibleFreeListSpace::reset_after_compaction() {
 293   // Reset the space to the new reality - one free chunk.
 294   MemRegion mr(compaction_top(), end());
 295   reset(mr);
 296   // Now refill the linear allocation block(s) if possible.
 297   if (_adaptive_freelists) {
 298     refillLinearAllocBlocksIfNeeded();
 299   } else {
 300     // Place as much of mr in the linAB as we can get,
 301     // provided it was big enough to go into the dictionary.
 302     FreeChunk* fc = dictionary()->find_largest_dict();
 303     if (fc != NULL) {
 304       assert(fc->size() == mr.word_size(),
 305              "Why was the chunk broken up?");
 306       removeChunkFromDictionary(fc);
 307       HeapWord* addr = (HeapWord*) fc;
 308       _smallLinearAllocBlock.set(addr, fc->size() ,
 309         1024*SmallForLinearAlloc, fc->size());
 310       // Note that _unallocated_block is not updated here.
 311     }
 312   }
 313 }
 314 
 315 // Walks the entire dictionary, returning a coterminal
 316 // chunk, if it exists. Use with caution since it involves
 317 // a potentially complete walk of a potentially large tree.
 318 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 319 
 320   assert_lock_strong(&_freelistLock);
 321 
 322   return dictionary()->find_chunk_ends_at(end());
 323 }
 324 
 325 
 326 #ifndef PRODUCT
 327 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 328   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 329     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 330   }
 331 }
 332 
 333 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 334   size_t sum = 0;
 335   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 336     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 337   }
 338   return sum;
 339 }
 340 
 341 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 342   size_t count = 0;
 343   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 344     debug_only(
 345       ssize_t total_list_count = 0;
 346       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 347          fc = fc->next()) {
 348         total_list_count++;
 349       }
 350       assert(total_list_count ==  _indexedFreeList[i].count(),
 351         "Count in list is incorrect");
 352     )
 353     count += _indexedFreeList[i].count();
 354   }
 355   return count;
 356 }
 357 
 358 size_t CompactibleFreeListSpace::totalCount() {
 359   size_t num = totalCountInIndexedFreeLists();
 360   num +=  dictionary()->total_count();
 361   if (_smallLinearAllocBlock._word_size != 0) {
 362     num++;
 363   }
 364   return num;
 365 }
 366 #endif
 367 
 368 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 369   FreeChunk* fc = (FreeChunk*) p;
 370   return fc->is_free();
 371 }
 372 
 373 size_t CompactibleFreeListSpace::used() const {
 374   return capacity() - free();
 375 }
 376 
 377 size_t CompactibleFreeListSpace::free() const {
 378   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 379   // if you do this while the structures are in flux you
 380   // may get an approximate answer only; for instance
 381   // because there is concurrent allocation either
 382   // directly by mutators or for promotion during a GC.
 383   // It's "MT-safe", however, in the sense that you are guaranteed
 384   // not to crash and burn, for instance, because of walking
 385   // pointers that could disappear as you were walking them.
 386   // The approximation is because the various components
 387   // that are read below are not read atomically (and
 388   // further the computation of totalSizeInIndexedFreeLists()
 389   // is itself a non-atomic computation. The normal use of
 390   // this is during a resize operation at the end of GC
 391   // and at that time you are guaranteed to get the
 392   // correct actual value. However, for instance, this is
 393   // also read completely asynchronously by the "perf-sampler"
 394   // that supports jvmstat, and you are apt to see the values
 395   // flicker in such cases.
 396   assert(_dictionary != NULL, "No _dictionary?");
 397   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 398           totalSizeInIndexedFreeLists() +
 399           _smallLinearAllocBlock._word_size) * HeapWordSize;
 400 }
 401 
 402 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 403   assert(_dictionary != NULL, "No _dictionary?");
 404   assert_locked();
 405   size_t res = _dictionary->max_chunk_size();
 406   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 407                        (size_t) SmallForLinearAlloc - 1));
 408   // XXX the following could potentially be pretty slow;
 409   // should one, pesimally for the rare cases when res
 410   // caclulated above is less than IndexSetSize,
 411   // just return res calculated above? My reasoning was that
 412   // those cases will be so rare that the extra time spent doesn't
 413   // really matter....
 414   // Note: do not change the loop test i >= res + IndexSetStride
 415   // to i > res below, because i is unsigned and res may be zero.
 416   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 417        i -= IndexSetStride) {
 418     if (_indexedFreeList[i].head() != NULL) {
 419       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 420       return i;
 421     }
 422   }
 423   return res;
 424 }
 425 
 426 void LinearAllocBlock::print_on(outputStream* st) const {
 427   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 428             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 429             _ptr, _word_size, _refillSize, _allocation_size_limit);
 430 }
 431 
 432 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 433   st->print_cr("COMPACTIBLE FREELIST SPACE");
 434   st->print_cr(" Space:");
 435   Space::print_on(st);
 436 
 437   st->print_cr("promoInfo:");
 438   _promoInfo.print_on(st);
 439 
 440   st->print_cr("_smallLinearAllocBlock");
 441   _smallLinearAllocBlock.print_on(st);
 442 
 443   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 444 
 445   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 446                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 447 }
 448 
 449 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 450 const {
 451   reportIndexedFreeListStatistics();
 452   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 453   gclog_or_tty->print_cr("---------------------------");
 454   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 455   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 456     _indexedFreeList[i].print_on(gclog_or_tty);
 457     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 458          fc = fc->next()) {
 459       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 460                           fc, (HeapWord*)fc + i,
 461                           fc->cantCoalesce() ? "\t CC" : "");
 462     }
 463   }
 464 }
 465 
 466 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 467 const {
 468   _promoInfo.print_on(st);
 469 }
 470 
 471 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 472 const {
 473   _dictionary->report_statistics();
 474   st->print_cr("Layout of Freelists in Tree");
 475   st->print_cr("---------------------------");
 476   _dictionary->print_free_lists(st);
 477 }
 478 
 479 class BlkPrintingClosure: public BlkClosure {
 480   const CMSCollector*             _collector;
 481   const CompactibleFreeListSpace* _sp;
 482   const CMSBitMap*                _live_bit_map;
 483   const bool                      _post_remark;
 484   outputStream*                   _st;
 485 public:
 486   BlkPrintingClosure(const CMSCollector* collector,
 487                      const CompactibleFreeListSpace* sp,
 488                      const CMSBitMap* live_bit_map,
 489                      outputStream* st):
 490     _collector(collector),
 491     _sp(sp),
 492     _live_bit_map(live_bit_map),
 493     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 494     _st(st) { }
 495   size_t do_blk(HeapWord* addr);
 496 };
 497 
 498 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 499   size_t sz = _sp->block_size_no_stall(addr, _collector);
 500   assert(sz != 0, "Should always be able to compute a size");
 501   if (_sp->block_is_obj(addr)) {
 502     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 503     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 504       addr,
 505       dead ? "dead" : "live",
 506       sz,
 507       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 508     if (CMSPrintObjectsInDump && !dead) {
 509       oop(addr)->print_on(_st);
 510       _st->print_cr("--------------------------------------");
 511     }
 512   } else { // free block
 513     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 514       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 515     if (CMSPrintChunksInDump) {
 516       ((FreeChunk*)addr)->print_on(_st);
 517       _st->print_cr("--------------------------------------");
 518     }
 519   }
 520   return sz;
 521 }
 522 
 523 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 524   outputStream* st) {
 525   st->print_cr("\n=========================");
 526   st->print_cr("Block layout in CMS Heap:");
 527   st->print_cr("=========================");
 528   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 529   blk_iterate(&bpcl);
 530 
 531   st->print_cr("\n=======================================");
 532   st->print_cr("Order & Layout of Promotion Info Blocks");
 533   st->print_cr("=======================================");
 534   print_promo_info_blocks(st);
 535 
 536   st->print_cr("\n===========================");
 537   st->print_cr("Order of Indexed Free Lists");
 538   st->print_cr("=========================");
 539   print_indexed_free_lists(st);
 540 
 541   st->print_cr("\n=================================");
 542   st->print_cr("Order of Free Lists in Dictionary");
 543   st->print_cr("=================================");
 544   print_dictionary_free_lists(st);
 545 }
 546 
 547 
 548 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 549   assert_lock_strong(&_freelistLock);
 550   assert(PrintFLSStatistics != 0, "Reporting error");
 551   _dictionary->report_statistics();
 552   if (PrintFLSStatistics > 1) {
 553     reportIndexedFreeListStatistics();
 554     size_t total_size = totalSizeInIndexedFreeLists() +
 555                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 556     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 557   }
 558 }
 559 
 560 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 561   assert_lock_strong(&_freelistLock);
 562   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 563                       "--------------------------------\n");
 564   size_t total_size = totalSizeInIndexedFreeLists();
 565   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 566   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 567   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 568   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 569   if (free_blocks != 0) {
 570     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 571   }
 572 }
 573 
 574 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 575   size_t res = 0;
 576   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 577     debug_only(
 578       ssize_t recount = 0;
 579       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 580          fc = fc->next()) {
 581         recount += 1;
 582       }
 583       assert(recount == _indexedFreeList[i].count(),
 584         "Incorrect count in list");
 585     )
 586     res += _indexedFreeList[i].count();
 587   }
 588   return res;
 589 }
 590 
 591 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 592   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 593     if (_indexedFreeList[i].head() != NULL) {
 594       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 595       return (size_t)i;
 596     }
 597   }
 598   return 0;
 599 }
 600 
 601 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 602   HeapWord* prevEnd = end();
 603   assert(prevEnd != value, "unnecessary set_end call");
 604   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 605         "New end is below unallocated block");
 606   _end = value;
 607   if (prevEnd != NULL) {
 608     // Resize the underlying block offset table.
 609     _bt.resize(pointer_delta(value, bottom()));
 610     if (value <= prevEnd) {
 611       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 612              "New end is below unallocated block");
 613     } else {
 614       // Now, take this new chunk and add it to the free blocks.
 615       // Note that the BOT has not yet been updated for this block.
 616       size_t newFcSize = pointer_delta(value, prevEnd);
 617       // XXX This is REALLY UGLY and should be fixed up. XXX
 618       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 619         // Mark the boundary of the new block in BOT
 620         _bt.mark_block(prevEnd, value);
 621         // put it all in the linAB
 622         if (ParallelGCThreads == 0) {
 623           _smallLinearAllocBlock._ptr = prevEnd;
 624           _smallLinearAllocBlock._word_size = newFcSize;
 625           repairLinearAllocBlock(&_smallLinearAllocBlock);
 626         } else { // ParallelGCThreads > 0
 627           MutexLockerEx x(parDictionaryAllocLock(),
 628                           Mutex::_no_safepoint_check_flag);
 629           _smallLinearAllocBlock._ptr = prevEnd;
 630           _smallLinearAllocBlock._word_size = newFcSize;
 631           repairLinearAllocBlock(&_smallLinearAllocBlock);
 632         }
 633         // Births of chunks put into a LinAB are not recorded.  Births
 634         // of chunks as they are allocated out of a LinAB are.
 635       } else {
 636         // Add the block to the free lists, if possible coalescing it
 637         // with the last free block, and update the BOT and census data.
 638         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 639       }
 640     }
 641   }
 642 }
 643 
 644 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 645   CompactibleFreeListSpace* _cfls;
 646   CMSCollector* _collector;
 647 protected:
 648   // Override.
 649 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 650   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl);                \
 653       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl);                \
 656     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 657                                        HeapWord* bottom, HeapWord* top, \
 658                                        ClosureType* cl)
 659   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 660   walk_mem_region_with_cl_DECL(FilteringClosure);
 661 
 662 public:
 663   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 664                       CMSCollector* collector,
 665                       ExtendedOopClosure* cl,
 666                       CardTableModRefBS::PrecisionStyle precision,
 667                       HeapWord* boundary) :
 668     Filtering_DCTOC(sp, cl, precision, boundary),
 669     _cfls(sp), _collector(collector) {}
 670 };
 671 
 672 // We de-virtualize the block-related calls below, since we know that our
 673 // space is a CompactibleFreeListSpace.
 674 
 675 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 676 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 677                                                  HeapWord* bottom,              \
 678                                                  HeapWord* top,                 \
 679                                                  ClosureType* cl) {             \
 680    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 681    if (is_par) {                                                                \
 682      assert(SharedHeap::heap()->n_par_threads() ==                              \
 683             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 684      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 685    } else {                                                                     \
 686      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 687    }                                                                            \
 688 }                                                                               \
 689 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 690                                                       HeapWord* bottom,         \
 691                                                       HeapWord* top,            \
 692                                                       ClosureType* cl) {        \
 693   /* Skip parts that are before "mr", in case "block_start" sent us             \
 694      back too far. */                                                           \
 695   HeapWord* mr_start = mr.start();                                              \
 696   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 697   HeapWord* next = bottom + bot_size;                                           \
 698   while (next < mr_start) {                                                     \
 699     bottom = next;                                                              \
 700     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 701     next = bottom + bot_size;                                                   \
 702   }                                                                             \
 703                                                                                 \
 704   while (bottom < top) {                                                        \
 705     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 706         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 707                     oop(bottom)) &&                                             \
 708         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 709       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 710       bottom += _cfls->adjustObjectSize(word_sz);                               \
 711     } else {                                                                    \
 712       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 713     }                                                                           \
 714   }                                                                             \
 715 }                                                                               \
 716 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 717                                                         HeapWord* bottom,       \
 718                                                         HeapWord* top,          \
 719                                                         ClosureType* cl) {      \
 720   /* Skip parts that are before "mr", in case "block_start" sent us             \
 721      back too far. */                                                           \
 722   HeapWord* mr_start = mr.start();                                              \
 723   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 724   HeapWord* next = bottom + bot_size;                                           \
 725   while (next < mr_start) {                                                     \
 726     bottom = next;                                                              \
 727     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 728     next = bottom + bot_size;                                                   \
 729   }                                                                             \
 730                                                                                 \
 731   while (bottom < top) {                                                        \
 732     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 733         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 734                     oop(bottom)) &&                                             \
 735         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 736       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 737       bottom += _cfls->adjustObjectSize(word_sz);                               \
 738     } else {                                                                    \
 739       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 740     }                                                                           \
 741   }                                                                             \
 742 }
 743 
 744 // (There are only two of these, rather than N, because the split is due
 745 // only to the introduction of the FilteringClosure, a local part of the
 746 // impl of this abstraction.)
 747 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 749 
 750 DirtyCardToOopClosure*
 751 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 752                                       CardTableModRefBS::PrecisionStyle precision,
 753                                       HeapWord* boundary) {
 754   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 755 }
 756 
 757 
 758 // Note on locking for the space iteration functions:
 759 // since the collector's iteration activities are concurrent with
 760 // allocation activities by mutators, absent a suitable mutual exclusion
 761 // mechanism the iterators may go awry. For instace a block being iterated
 762 // may suddenly be allocated or divided up and part of it allocated and
 763 // so on.
 764 
 765 // Apply the given closure to each block in the space.
 766 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 767   assert_lock_strong(freelistLock());
 768   HeapWord *cur, *limit;
 769   for (cur = bottom(), limit = end(); cur < limit;
 770        cur += cl->do_blk_careful(cur));
 771 }
 772 
 773 // Apply the given closure to each block in the space.
 774 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 775   assert_lock_strong(freelistLock());
 776   HeapWord *cur, *limit;
 777   for (cur = bottom(), limit = end(); cur < limit;
 778        cur += cl->do_blk(cur));
 779 }
 780 
 781 // Apply the given closure to each oop in the space.
 782 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 783   assert_lock_strong(freelistLock());
 784   HeapWord *cur, *limit;
 785   size_t curSize;
 786   for (cur = bottom(), limit = end(); cur < limit;
 787        cur += curSize) {
 788     curSize = block_size(cur);
 789     if (block_is_obj(cur)) {
 790       oop(cur)->oop_iterate(cl);
 791     }
 792   }
 793 }
 794 
 795 // Apply the given closure to each oop in the space \intersect memory region.
 796 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
 797   assert_lock_strong(freelistLock());
 798   if (is_empty()) {
 799     return;
 800   }
 801   MemRegion cur = MemRegion(bottom(), end());
 802   mr = mr.intersection(cur);
 803   if (mr.is_empty()) {
 804     return;
 805   }
 806   if (mr.equals(cur)) {
 807     oop_iterate(cl);
 808     return;
 809   }
 810   assert(mr.end() <= end(), "just took an intersection above");
 811   HeapWord* obj_addr = block_start(mr.start());
 812   HeapWord* t = mr.end();
 813 
 814   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 815   if (block_is_obj(obj_addr)) {
 816     // Handle first object specially.
 817     oop obj = oop(obj_addr);
 818     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 819   } else {
 820     FreeChunk* fc = (FreeChunk*)obj_addr;
 821     obj_addr += fc->size();
 822   }
 823   while (obj_addr < t) {
 824     HeapWord* obj = obj_addr;
 825     obj_addr += block_size(obj_addr);
 826     // If "obj_addr" is not greater than top, then the
 827     // entire object "obj" is within the region.
 828     if (obj_addr <= t) {
 829       if (block_is_obj(obj)) {
 830         oop(obj)->oop_iterate(cl);
 831       }
 832     } else {
 833       // "obj" extends beyond end of region
 834       if (block_is_obj(obj)) {
 835         oop(obj)->oop_iterate(&smr_blk);
 836       }
 837       break;
 838     }
 839   }
 840 }
 841 
 842 // NOTE: In the following methods, in order to safely be able to
 843 // apply the closure to an object, we need to be sure that the
 844 // object has been initialized. We are guaranteed that an object
 845 // is initialized if we are holding the Heap_lock with the
 846 // world stopped.
 847 void CompactibleFreeListSpace::verify_objects_initialized() const {
 848   if (is_init_completed()) {
 849     assert_locked_or_safepoint(Heap_lock);
 850     if (Universe::is_fully_initialized()) {
 851       guarantee(SafepointSynchronize::is_at_safepoint(),
 852                 "Required for objects to be initialized");
 853     }
 854   } // else make a concession at vm start-up
 855 }
 856 
 857 // Apply the given closure to each object in the space
 858 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 859   assert_lock_strong(freelistLock());
 860   NOT_PRODUCT(verify_objects_initialized());
 861   HeapWord *cur, *limit;
 862   size_t curSize;
 863   for (cur = bottom(), limit = end(); cur < limit;
 864        cur += curSize) {
 865     curSize = block_size(cur);
 866     if (block_is_obj(cur)) {
 867       blk->do_object(oop(cur));
 868     }
 869   }
 870 }
 871 
 872 // Apply the given closure to each live object in the space
 873 //   The usage of CompactibleFreeListSpace
 874 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 875 // objects in the space with references to objects that are no longer
 876 // valid.  For example, an object may reference another object
 877 // that has already been sweep up (collected).  This method uses
 878 // obj_is_alive() to determine whether it is safe to apply the closure to
 879 // an object.  See obj_is_alive() for details on how liveness of an
 880 // object is decided.
 881 
 882 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 883   assert_lock_strong(freelistLock());
 884   NOT_PRODUCT(verify_objects_initialized());
 885   HeapWord *cur, *limit;
 886   size_t curSize;
 887   for (cur = bottom(), limit = end(); cur < limit;
 888        cur += curSize) {
 889     curSize = block_size(cur);
 890     if (block_is_obj(cur) && obj_is_alive(cur)) {
 891       blk->do_object(oop(cur));
 892     }
 893   }
 894 }
 895 
 896 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 897                                                   UpwardsObjectClosure* cl) {
 898   assert_locked(freelistLock());
 899   NOT_PRODUCT(verify_objects_initialized());
 900   Space::object_iterate_mem(mr, cl);
 901 }
 902 
 903 // Callers of this iterator beware: The closure application should
 904 // be robust in the face of uninitialized objects and should (always)
 905 // return a correct size so that the next addr + size below gives us a
 906 // valid block boundary. [See for instance,
 907 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 908 // in ConcurrentMarkSweepGeneration.cpp.]
 909 HeapWord*
 910 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 911   assert_lock_strong(freelistLock());
 912   HeapWord *addr, *last;
 913   size_t size;
 914   for (addr = bottom(), last  = end();
 915        addr < last; addr += size) {
 916     FreeChunk* fc = (FreeChunk*)addr;
 917     if (fc->is_free()) {
 918       // Since we hold the free list lock, which protects direct
 919       // allocation in this generation by mutators, a free object
 920       // will remain free throughout this iteration code.
 921       size = fc->size();
 922     } else {
 923       // Note that the object need not necessarily be initialized,
 924       // because (for instance) the free list lock does NOT protect
 925       // object initialization. The closure application below must
 926       // therefore be correct in the face of uninitialized objects.
 927       size = cl->do_object_careful(oop(addr));
 928       if (size == 0) {
 929         // An unparsable object found. Signal early termination.
 930         return addr;
 931       }
 932     }
 933   }
 934   return NULL;
 935 }
 936 
 937 // Callers of this iterator beware: The closure application should
 938 // be robust in the face of uninitialized objects and should (always)
 939 // return a correct size so that the next addr + size below gives us a
 940 // valid block boundary. [See for instance,
 941 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 942 // in ConcurrentMarkSweepGeneration.cpp.]
 943 HeapWord*
 944 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 945   ObjectClosureCareful* cl) {
 946   assert_lock_strong(freelistLock());
 947   // Can't use used_region() below because it may not necessarily
 948   // be the same as [bottom(),end()); although we could
 949   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 950   // that appears too cumbersome, so we just do the simpler check
 951   // in the assertion below.
 952   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 953          "mr should be non-empty and within used space");
 954   HeapWord *addr, *end;
 955   size_t size;
 956   for (addr = block_start_careful(mr.start()), end  = mr.end();
 957        addr < end; addr += size) {
 958     FreeChunk* fc = (FreeChunk*)addr;
 959     if (fc->is_free()) {
 960       // Since we hold the free list lock, which protects direct
 961       // allocation in this generation by mutators, a free object
 962       // will remain free throughout this iteration code.
 963       size = fc->size();
 964     } else {
 965       // Note that the object need not necessarily be initialized,
 966       // because (for instance) the free list lock does NOT protect
 967       // object initialization. The closure application below must
 968       // therefore be correct in the face of uninitialized objects.
 969       size = cl->do_object_careful_m(oop(addr), mr);
 970       if (size == 0) {
 971         // An unparsable object found. Signal early termination.
 972         return addr;
 973       }
 974     }
 975   }
 976   return NULL;
 977 }
 978 
 979 
 980 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 981   NOT_PRODUCT(verify_objects_initialized());
 982   return _bt.block_start(p);
 983 }
 984 
 985 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 986   return _bt.block_start_careful(p);
 987 }
 988 
 989 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 990   NOT_PRODUCT(verify_objects_initialized());
 991   // This must be volatile, or else there is a danger that the compiler
 992   // will compile the code below into a sometimes-infinite loop, by keeping
 993   // the value read the first time in a register.
 994   while (true) {
 995     // We must do this until we get a consistent view of the object.
 996     if (FreeChunk::indicatesFreeChunk(p)) {
 997       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 998       size_t res = fc->size();
 999       // If the object is still a free chunk, return the size, else it
1000       // has been allocated so try again.
1001       if (FreeChunk::indicatesFreeChunk(p)) {
1002         assert(res != 0, "Block size should not be 0");
1003         return res;
1004       }
1005     } else {
1006       // must read from what 'p' points to in each loop.
1007       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1008       if (k != NULL) {
1009         assert(k->is_klass(), "Should really be klass oop.");
1010         oop o = (oop)p;
1011         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1012         size_t res = o->size_given_klass(k);
1013         res = adjustObjectSize(res);
1014         assert(res != 0, "Block size should not be 0");
1015         return res;
1016       }
1017     }
1018   }
1019 }
1020 
1021 // TODO: Now that is_parsable is gone, we should combine these two functions.
1022 // A variant of the above that uses the Printezis bits for
1023 // unparsable but allocated objects. This avoids any possible
1024 // stalls waiting for mutators to initialize objects, and is
1025 // thus potentially faster than the variant above. However,
1026 // this variant may return a zero size for a block that is
1027 // under mutation and for which a consistent size cannot be
1028 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1029 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1030                                                      const CMSCollector* c)
1031 const {
1032   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1033   // This must be volatile, or else there is a danger that the compiler
1034   // will compile the code below into a sometimes-infinite loop, by keeping
1035   // the value read the first time in a register.
1036   DEBUG_ONLY(uint loops = 0;)
1037   while (true) {
1038     // We must do this until we get a consistent view of the object.
1039     if (FreeChunk::indicatesFreeChunk(p)) {
1040       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1041       size_t res = fc->size();
1042       if (FreeChunk::indicatesFreeChunk(p)) {
1043         assert(res != 0, "Block size should not be 0");
1044         assert(loops == 0, "Should be 0");
1045         return res;
1046       }
1047     } else {
1048       // must read from what 'p' points to in each loop.
1049       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1050       // We trust the size of any object that has a non-NULL
1051       // klass and (for those in the perm gen) is parsable
1052       // -- irrespective of its conc_safe-ty.
1053       if (k != NULL) {
1054         assert(k->is_klass(), "Should really be klass oop.");
1055         oop o = (oop)p;
1056         assert(o->is_oop(), "Should be an oop");
1057         size_t res = o->size_given_klass(k);
1058         res = adjustObjectSize(res);
1059         assert(res != 0, "Block size should not be 0");
1060         return res;
1061       } else {
1062         // May return 0 if P-bits not present.
1063         return c->block_size_if_printezis_bits(p);
1064       }
1065     }
1066     assert(loops == 0, "Can loop at most once");
1067     DEBUG_ONLY(loops++;)
1068   }
1069 }
1070 
1071 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1072   NOT_PRODUCT(verify_objects_initialized());
1073   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1074   FreeChunk* fc = (FreeChunk*)p;
1075   if (fc->is_free()) {
1076     return fc->size();
1077   } else {
1078     // Ignore mark word because this may be a recently promoted
1079     // object whose mark word is used to chain together grey
1080     // objects (the last one would have a null value).
1081     assert(oop(p)->is_oop(true), "Should be an oop");
1082     return adjustObjectSize(oop(p)->size());
1083   }
1084 }
1085 
1086 // This implementation assumes that the property of "being an object" is
1087 // stable.  But being a free chunk may not be (because of parallel
1088 // promotion.)
1089 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1090   FreeChunk* fc = (FreeChunk*)p;
1091   assert(is_in_reserved(p), "Should be in space");
1092   // When doing a mark-sweep-compact of the CMS generation, this
1093   // assertion may fail because prepare_for_compaction() uses
1094   // space that is garbage to maintain information on ranges of
1095   // live objects so that these live ranges can be moved as a whole.
1096   // Comment out this assertion until that problem can be solved
1097   // (i.e., that the block start calculation may look at objects
1098   // at address below "p" in finding the object that contains "p"
1099   // and those objects (if garbage) may have been modified to hold
1100   // live range information.
1101   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1102   //        "Should be a block boundary");
1103   if (FreeChunk::indicatesFreeChunk(p)) return false;
1104   Klass* k = oop(p)->klass_or_null();
1105   if (k != NULL) {
1106     // Ignore mark word because it may have been used to
1107     // chain together promoted objects (the last one
1108     // would have a null value).
1109     assert(oop(p)->is_oop(true), "Should be an oop");
1110     return true;
1111   } else {
1112     return false;  // Was not an object at the start of collection.
1113   }
1114 }
1115 
1116 // Check if the object is alive. This fact is checked either by consulting
1117 // the main marking bitmap in the sweeping phase or, if it's a permanent
1118 // generation and we're not in the sweeping phase, by checking the
1119 // perm_gen_verify_bit_map where we store the "deadness" information if
1120 // we did not sweep the perm gen in the most recent previous GC cycle.
1121 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1122   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1123          "Else races are possible");
1124   assert(block_is_obj(p), "The address should point to an object");
1125 
1126   // If we're sweeping, we use object liveness information from the main bit map
1127   // for both perm gen and old gen.
1128   // We don't need to lock the bitmap (live_map or dead_map below), because
1129   // EITHER we are in the middle of the sweeping phase, and the
1130   // main marking bit map (live_map below) is locked,
1131   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1132   // is stable, because it's mutated only in the sweeping phase.
1133   // NOTE: This method is also used by jmap where, if class unloading is
1134   // off, the results can return "false" for legitimate perm objects,
1135   // when we are not in the midst of a sweeping phase, which can result
1136   // in jmap not reporting certain perm gen objects. This will be moot
1137   // if/when the perm gen goes away in the future.
1138   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1139     CMSBitMap* live_map = _collector->markBitMap();
1140     return live_map->par_isMarked((HeapWord*) p);
1141   }
1142   return true;
1143 }
1144 
1145 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1146   FreeChunk* fc = (FreeChunk*)p;
1147   assert(is_in_reserved(p), "Should be in space");
1148   assert(_bt.block_start(p) == p, "Should be a block boundary");
1149   if (!fc->is_free()) {
1150     // Ignore mark word because it may have been used to
1151     // chain together promoted objects (the last one
1152     // would have a null value).
1153     assert(oop(p)->is_oop(true), "Should be an oop");
1154     return true;
1155   }
1156   return false;
1157 }
1158 
1159 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1160 // approximate answer if you don't hold the freelistlock when you call this.
1161 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1162   size_t size = 0;
1163   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1164     debug_only(
1165       // We may be calling here without the lock in which case we
1166       // won't do this modest sanity check.
1167       if (freelistLock()->owned_by_self()) {
1168         size_t total_list_size = 0;
1169         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1170           fc = fc->next()) {
1171           total_list_size += i;
1172         }
1173         assert(total_list_size == i * _indexedFreeList[i].count(),
1174                "Count in list is incorrect");
1175       }
1176     )
1177     size += i * _indexedFreeList[i].count();
1178   }
1179   return size;
1180 }
1181 
1182 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1183   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1184   return allocate(size);
1185 }
1186 
1187 HeapWord*
1188 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1189   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1190 }
1191 
1192 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1193   assert_lock_strong(freelistLock());
1194   HeapWord* res = NULL;
1195   assert(size == adjustObjectSize(size),
1196          "use adjustObjectSize() before calling into allocate()");
1197 
1198   if (_adaptive_freelists) {
1199     res = allocate_adaptive_freelists(size);
1200   } else {  // non-adaptive free lists
1201     res = allocate_non_adaptive_freelists(size);
1202   }
1203 
1204   if (res != NULL) {
1205     // check that res does lie in this space!
1206     assert(is_in_reserved(res), "Not in this space!");
1207     assert(is_aligned((void*)res), "alignment check");
1208 
1209     FreeChunk* fc = (FreeChunk*)res;
1210     fc->markNotFree();
1211     assert(!fc->is_free(), "shouldn't be marked free");
1212     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1213     // Verify that the block offset table shows this to
1214     // be a single block, but not one which is unallocated.
1215     _bt.verify_single_block(res, size);
1216     _bt.verify_not_unallocated(res, size);
1217     // mangle a just allocated object with a distinct pattern.
1218     debug_only(fc->mangleAllocated(size));
1219   }
1220 
1221   return res;
1222 }
1223 
1224 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1225   HeapWord* res = NULL;
1226   // try and use linear allocation for smaller blocks
1227   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1228     // if successful, the following also adjusts block offset table
1229     res = getChunkFromSmallLinearAllocBlock(size);
1230   }
1231   // Else triage to indexed lists for smaller sizes
1232   if (res == NULL) {
1233     if (size < SmallForDictionary) {
1234       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1235     } else {
1236       // else get it from the big dictionary; if even this doesn't
1237       // work we are out of luck.
1238       res = (HeapWord*)getChunkFromDictionaryExact(size);
1239     }
1240   }
1241 
1242   return res;
1243 }
1244 
1245 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1246   assert_lock_strong(freelistLock());
1247   HeapWord* res = NULL;
1248   assert(size == adjustObjectSize(size),
1249          "use adjustObjectSize() before calling into allocate()");
1250 
1251   // Strategy
1252   //   if small
1253   //     exact size from small object indexed list if small
1254   //     small or large linear allocation block (linAB) as appropriate
1255   //     take from lists of greater sized chunks
1256   //   else
1257   //     dictionary
1258   //     small or large linear allocation block if it has the space
1259   // Try allocating exact size from indexTable first
1260   if (size < IndexSetSize) {
1261     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1262     if(res != NULL) {
1263       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1264         "Not removed from free list");
1265       // no block offset table adjustment is necessary on blocks in
1266       // the indexed lists.
1267 
1268     // Try allocating from the small LinAB
1269     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1270         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1271         // if successful, the above also adjusts block offset table
1272         // Note that this call will refill the LinAB to
1273         // satisfy the request.  This is different that
1274         // evm.
1275         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1276     } else {
1277       // Raid the exact free lists larger than size, even if they are not
1278       // overpopulated.
1279       res = (HeapWord*) getChunkFromGreater(size);
1280     }
1281   } else {
1282     // Big objects get allocated directly from the dictionary.
1283     res = (HeapWord*) getChunkFromDictionaryExact(size);
1284     if (res == NULL) {
1285       // Try hard not to fail since an allocation failure will likely
1286       // trigger a synchronous GC.  Try to get the space from the
1287       // allocation blocks.
1288       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1289     }
1290   }
1291 
1292   return res;
1293 }
1294 
1295 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1296 // when promoting obj.
1297 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1298   // Depending on the object size, expansion may require refilling either a
1299   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1300   // is added because the dictionary may over-allocate to avoid fragmentation.
1301   size_t space = obj_size;
1302   if (!_adaptive_freelists) {
1303     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1304   }
1305   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1306   return space;
1307 }
1308 
1309 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1310   FreeChunk* ret;
1311 
1312   assert(numWords >= MinChunkSize, "Size is less than minimum");
1313   assert(linearAllocationWouldFail() || bestFitFirst(),
1314     "Should not be here");
1315 
1316   size_t i;
1317   size_t currSize = numWords + MinChunkSize;
1318   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1319   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1320     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1321     if (fl->head()) {
1322       ret = getFromListGreater(fl, numWords);
1323       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1324       return ret;
1325     }
1326   }
1327 
1328   currSize = MAX2((size_t)SmallForDictionary,
1329                   (size_t)(numWords + MinChunkSize));
1330 
1331   /* Try to get a chunk that satisfies request, while avoiding
1332      fragmentation that can't be handled. */
1333   {
1334     ret =  dictionary()->get_chunk(currSize);
1335     if (ret != NULL) {
1336       assert(ret->size() - numWords >= MinChunkSize,
1337              "Chunk is too small");
1338       _bt.allocated((HeapWord*)ret, ret->size());
1339       /* Carve returned chunk. */
1340       (void) splitChunkAndReturnRemainder(ret, numWords);
1341       /* Label this as no longer a free chunk. */
1342       assert(ret->is_free(), "This chunk should be free");
1343       ret->link_prev(NULL);
1344     }
1345     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1346     return ret;
1347   }
1348   ShouldNotReachHere();
1349 }
1350 
1351 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1352   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1353   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1354 }
1355 
1356 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1357   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1358          (_smallLinearAllocBlock._word_size == fc->size()),
1359          "Linear allocation block shows incorrect size");
1360   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1361           (_smallLinearAllocBlock._word_size == fc->size()));
1362 }
1363 
1364 // Check if the purported free chunk is present either as a linear
1365 // allocation block, the size-indexed table of (smaller) free blocks,
1366 // or the larger free blocks kept in the binary tree dictionary.
1367 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1368   if (verify_chunk_is_linear_alloc_block(fc)) {
1369     return true;
1370   } else if (fc->size() < IndexSetSize) {
1371     return verifyChunkInIndexedFreeLists(fc);
1372   } else {
1373     return dictionary()->verify_chunk_in_free_list(fc);
1374   }
1375 }
1376 
1377 #ifndef PRODUCT
1378 void CompactibleFreeListSpace::assert_locked() const {
1379   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1380 }
1381 
1382 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1383   CMSLockVerifier::assert_locked(lock);
1384 }
1385 #endif
1386 
1387 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1388   // In the parallel case, the main thread holds the free list lock
1389   // on behalf the parallel threads.
1390   FreeChunk* fc;
1391   {
1392     // If GC is parallel, this might be called by several threads.
1393     // This should be rare enough that the locking overhead won't affect
1394     // the sequential code.
1395     MutexLockerEx x(parDictionaryAllocLock(),
1396                     Mutex::_no_safepoint_check_flag);
1397     fc = getChunkFromDictionary(size);
1398   }
1399   if (fc != NULL) {
1400     fc->dontCoalesce();
1401     assert(fc->is_free(), "Should be free, but not coalescable");
1402     // Verify that the block offset table shows this to
1403     // be a single block, but not one which is unallocated.
1404     _bt.verify_single_block((HeapWord*)fc, fc->size());
1405     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1406   }
1407   return fc;
1408 }
1409 
1410 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1411   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1412   assert_locked();
1413 
1414   // if we are tracking promotions, then first ensure space for
1415   // promotion (including spooling space for saving header if necessary).
1416   // then allocate and copy, then track promoted info if needed.
1417   // When tracking (see PromotionInfo::track()), the mark word may
1418   // be displaced and in this case restoration of the mark word
1419   // occurs in the (oop_since_save_marks_)iterate phase.
1420   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1421     return NULL;
1422   }
1423   // Call the allocate(size_t, bool) form directly to avoid the
1424   // additional call through the allocate(size_t) form.  Having
1425   // the compile inline the call is problematic because allocate(size_t)
1426   // is a virtual method.
1427   HeapWord* res = allocate(adjustObjectSize(obj_size));
1428   if (res != NULL) {
1429     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1430     // if we should be tracking promotions, do so.
1431     if (_promoInfo.tracking()) {
1432         _promoInfo.track((PromotedObject*)res);
1433     }
1434   }
1435   return oop(res);
1436 }
1437 
1438 HeapWord*
1439 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1440   assert_locked();
1441   assert(size >= MinChunkSize, "minimum chunk size");
1442   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1443     "maximum from smallLinearAllocBlock");
1444   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1445 }
1446 
1447 HeapWord*
1448 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1449                                                        size_t size) {
1450   assert_locked();
1451   assert(size >= MinChunkSize, "too small");
1452   HeapWord* res = NULL;
1453   // Try to do linear allocation from blk, making sure that
1454   if (blk->_word_size == 0) {
1455     // We have probably been unable to fill this either in the prologue or
1456     // when it was exhausted at the last linear allocation. Bail out until
1457     // next time.
1458     assert(blk->_ptr == NULL, "consistency check");
1459     return NULL;
1460   }
1461   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1462   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1463   if (res != NULL) return res;
1464 
1465   // about to exhaust this linear allocation block
1466   if (blk->_word_size == size) { // exactly satisfied
1467     res = blk->_ptr;
1468     _bt.allocated(res, blk->_word_size);
1469   } else if (size + MinChunkSize <= blk->_refillSize) {
1470     size_t sz = blk->_word_size;
1471     // Update _unallocated_block if the size is such that chunk would be
1472     // returned to the indexed free list.  All other chunks in the indexed
1473     // free lists are allocated from the dictionary so that _unallocated_block
1474     // has already been adjusted for them.  Do it here so that the cost
1475     // for all chunks added back to the indexed free lists.
1476     if (sz < SmallForDictionary) {
1477       _bt.allocated(blk->_ptr, sz);
1478     }
1479     // Return the chunk that isn't big enough, and then refill below.
1480     addChunkToFreeLists(blk->_ptr, sz);
1481     split_birth(sz);
1482     // Don't keep statistics on adding back chunk from a LinAB.
1483   } else {
1484     // A refilled block would not satisfy the request.
1485     return NULL;
1486   }
1487 
1488   blk->_ptr = NULL; blk->_word_size = 0;
1489   refillLinearAllocBlock(blk);
1490   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1491          "block was replenished");
1492   if (res != NULL) {
1493     split_birth(size);
1494     repairLinearAllocBlock(blk);
1495   } else if (blk->_ptr != NULL) {
1496     res = blk->_ptr;
1497     size_t blk_size = blk->_word_size;
1498     blk->_word_size -= size;
1499     blk->_ptr  += size;
1500     split_birth(size);
1501     repairLinearAllocBlock(blk);
1502     // Update BOT last so that other (parallel) GC threads see a consistent
1503     // view of the BOT and free blocks.
1504     // Above must occur before BOT is updated below.
1505     OrderAccess::storestore();
1506     _bt.split_block(res, blk_size, size);  // adjust block offset table
1507   }
1508   return res;
1509 }
1510 
1511 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1512                                         LinearAllocBlock* blk,
1513                                         size_t size) {
1514   assert_locked();
1515   assert(size >= MinChunkSize, "too small");
1516 
1517   HeapWord* res = NULL;
1518   // This is the common case.  Keep it simple.
1519   if (blk->_word_size >= size + MinChunkSize) {
1520     assert(blk->_ptr != NULL, "consistency check");
1521     res = blk->_ptr;
1522     // Note that the BOT is up-to-date for the linAB before allocation.  It
1523     // indicates the start of the linAB.  The split_block() updates the
1524     // BOT for the linAB after the allocation (indicates the start of the
1525     // next chunk to be allocated).
1526     size_t blk_size = blk->_word_size;
1527     blk->_word_size -= size;
1528     blk->_ptr  += size;
1529     split_birth(size);
1530     repairLinearAllocBlock(blk);
1531     // Update BOT last so that other (parallel) GC threads see a consistent
1532     // view of the BOT and free blocks.
1533     // Above must occur before BOT is updated below.
1534     OrderAccess::storestore();
1535     _bt.split_block(res, blk_size, size);  // adjust block offset table
1536     _bt.allocated(res, size);
1537   }
1538   return res;
1539 }
1540 
1541 FreeChunk*
1542 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1543   assert_locked();
1544   assert(size < SmallForDictionary, "just checking");
1545   FreeChunk* res;
1546   res = _indexedFreeList[size].get_chunk_at_head();
1547   if (res == NULL) {
1548     res = getChunkFromIndexedFreeListHelper(size);
1549   }
1550   _bt.verify_not_unallocated((HeapWord*) res, size);
1551   assert(res == NULL || res->size() == size, "Incorrect block size");
1552   return res;
1553 }
1554 
1555 FreeChunk*
1556 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1557   bool replenish) {
1558   assert_locked();
1559   FreeChunk* fc = NULL;
1560   if (size < SmallForDictionary) {
1561     assert(_indexedFreeList[size].head() == NULL ||
1562       _indexedFreeList[size].surplus() <= 0,
1563       "List for this size should be empty or under populated");
1564     // Try best fit in exact lists before replenishing the list
1565     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1566       // Replenish list.
1567       //
1568       // Things tried that failed.
1569       //   Tried allocating out of the two LinAB's first before
1570       // replenishing lists.
1571       //   Tried small linAB of size 256 (size in indexed list)
1572       // and replenishing indexed lists from the small linAB.
1573       //
1574       FreeChunk* newFc = NULL;
1575       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1576       if (replenish_size < SmallForDictionary) {
1577         // Do not replenish from an underpopulated size.
1578         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1579             _indexedFreeList[replenish_size].head() != NULL) {
1580           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1581         } else if (bestFitFirst()) {
1582           newFc = bestFitSmall(replenish_size);
1583         }
1584       }
1585       if (newFc == NULL && replenish_size > size) {
1586         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1587         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1588       }
1589       // Note: The stats update re split-death of block obtained above
1590       // will be recorded below precisely when we know we are going to
1591       // be actually splitting it into more than one pieces below.
1592       if (newFc != NULL) {
1593         if  (replenish || CMSReplenishIntermediate) {
1594           // Replenish this list and return one block to caller.
1595           size_t i;
1596           FreeChunk *curFc, *nextFc;
1597           size_t num_blk = newFc->size() / size;
1598           assert(num_blk >= 1, "Smaller than requested?");
1599           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1600           if (num_blk > 1) {
1601             // we are sure we will be splitting the block just obtained
1602             // into multiple pieces; record the split-death of the original
1603             splitDeath(replenish_size);
1604           }
1605           // carve up and link blocks 0, ..., num_blk - 2
1606           // The last chunk is not added to the lists but is returned as the
1607           // free chunk.
1608           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1609                i = 0;
1610                i < (num_blk - 1);
1611                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1612                i++) {
1613             curFc->set_size(size);
1614             // Don't record this as a return in order to try and
1615             // determine the "returns" from a GC.
1616             _bt.verify_not_unallocated((HeapWord*) fc, size);
1617             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1618             _bt.mark_block((HeapWord*)curFc, size);
1619             split_birth(size);
1620             // Don't record the initial population of the indexed list
1621             // as a split birth.
1622           }
1623 
1624           // check that the arithmetic was OK above
1625           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1626             "inconsistency in carving newFc");
1627           curFc->set_size(size);
1628           _bt.mark_block((HeapWord*)curFc, size);
1629           split_birth(size);
1630           fc = curFc;
1631         } else {
1632           // Return entire block to caller
1633           fc = newFc;
1634         }
1635       }
1636     }
1637   } else {
1638     // Get a free chunk from the free chunk dictionary to be returned to
1639     // replenish the indexed free list.
1640     fc = getChunkFromDictionaryExact(size);
1641   }
1642   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1643   return fc;
1644 }
1645 
1646 FreeChunk*
1647 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1648   assert_locked();
1649   FreeChunk* fc = _dictionary->get_chunk(size,
1650                                          FreeBlockDictionary<FreeChunk>::atLeast);
1651   if (fc == NULL) {
1652     return NULL;
1653   }
1654   _bt.allocated((HeapWord*)fc, fc->size());
1655   if (fc->size() >= size + MinChunkSize) {
1656     fc = splitChunkAndReturnRemainder(fc, size);
1657   }
1658   assert(fc->size() >= size, "chunk too small");
1659   assert(fc->size() < size + MinChunkSize, "chunk too big");
1660   _bt.verify_single_block((HeapWord*)fc, fc->size());
1661   return fc;
1662 }
1663 
1664 FreeChunk*
1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1666   assert_locked();
1667   FreeChunk* fc = _dictionary->get_chunk(size,
1668                                          FreeBlockDictionary<FreeChunk>::atLeast);
1669   if (fc == NULL) {
1670     return fc;
1671   }
1672   _bt.allocated((HeapWord*)fc, fc->size());
1673   if (fc->size() == size) {
1674     _bt.verify_single_block((HeapWord*)fc, size);
1675     return fc;
1676   }
1677   assert(fc->size() > size, "get_chunk() guarantee");
1678   if (fc->size() < size + MinChunkSize) {
1679     // Return the chunk to the dictionary and go get a bigger one.
1680     returnChunkToDictionary(fc);
1681     fc = _dictionary->get_chunk(size + MinChunkSize,
1682                                 FreeBlockDictionary<FreeChunk>::atLeast);
1683     if (fc == NULL) {
1684       return NULL;
1685     }
1686     _bt.allocated((HeapWord*)fc, fc->size());
1687   }
1688   assert(fc->size() >= size + MinChunkSize, "tautology");
1689   fc = splitChunkAndReturnRemainder(fc, size);
1690   assert(fc->size() == size, "chunk is wrong size");
1691   _bt.verify_single_block((HeapWord*)fc, size);
1692   return fc;
1693 }
1694 
1695 void
1696 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1697   assert_locked();
1698 
1699   size_t size = chunk->size();
1700   _bt.verify_single_block((HeapWord*)chunk, size);
1701   // adjust _unallocated_block downward, as necessary
1702   _bt.freed((HeapWord*)chunk, size);
1703   _dictionary->return_chunk(chunk);
1704 #ifndef PRODUCT
1705   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1706     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
1707     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
1708     tl->verify_stats();
1709   }
1710 #endif // PRODUCT
1711 }
1712 
1713 void
1714 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1715   assert_locked();
1716   size_t size = fc->size();
1717   _bt.verify_single_block((HeapWord*) fc, size);
1718   _bt.verify_not_unallocated((HeapWord*) fc, size);
1719   if (_adaptive_freelists) {
1720     _indexedFreeList[size].return_chunk_at_tail(fc);
1721   } else {
1722     _indexedFreeList[size].return_chunk_at_head(fc);
1723   }
1724 #ifndef PRODUCT
1725   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1726      _indexedFreeList[size].verify_stats();
1727   }
1728 #endif // PRODUCT
1729 }
1730 
1731 // Add chunk to end of last block -- if it's the largest
1732 // block -- and update BOT and census data. We would
1733 // of course have preferred to coalesce it with the
1734 // last block, but it's currently less expensive to find the
1735 // largest block than it is to find the last.
1736 void
1737 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1738   HeapWord* chunk, size_t     size) {
1739   // check that the chunk does lie in this space!
1740   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1741   // One of the parallel gc task threads may be here
1742   // whilst others are allocating.
1743   Mutex* lock = NULL;
1744   if (ParallelGCThreads != 0) {
1745     lock = &_parDictionaryAllocLock;
1746   }
1747   FreeChunk* ec;
1748   {
1749     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1750     ec = dictionary()->find_largest_dict();  // get largest block
1751     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1752       // It's a coterminal block - we can coalesce.
1753       size_t old_size = ec->size();
1754       coalDeath(old_size);
1755       removeChunkFromDictionary(ec);
1756       size += old_size;
1757     } else {
1758       ec = (FreeChunk*)chunk;
1759     }
1760   }
1761   ec->set_size(size);
1762   debug_only(ec->mangleFreed(size));
1763   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1764     lock = _indexedFreeListParLocks[size];
1765   }
1766   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1767   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1768   // record the birth under the lock since the recording involves
1769   // manipulation of the list on which the chunk lives and
1770   // if the chunk is allocated and is the last on the list,
1771   // the list can go away.
1772   coalBirth(size);
1773 }
1774 
1775 void
1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1777                                               size_t     size) {
1778   // check that the chunk does lie in this space!
1779   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1780   assert_locked();
1781   _bt.verify_single_block(chunk, size);
1782 
1783   FreeChunk* fc = (FreeChunk*) chunk;
1784   fc->set_size(size);
1785   debug_only(fc->mangleFreed(size));
1786   if (size < SmallForDictionary) {
1787     returnChunkToFreeList(fc);
1788   } else {
1789     returnChunkToDictionary(fc);
1790   }
1791 }
1792 
1793 void
1794 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1795   size_t size, bool coalesced) {
1796   assert_locked();
1797   assert(chunk != NULL, "null chunk");
1798   if (coalesced) {
1799     // repair BOT
1800     _bt.single_block(chunk, size);
1801   }
1802   addChunkToFreeLists(chunk, size);
1803 }
1804 
1805 // We _must_ find the purported chunk on our free lists;
1806 // we assert if we don't.
1807 void
1808 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1809   size_t size = fc->size();
1810   assert_locked();
1811   debug_only(verifyFreeLists());
1812   if (size < SmallForDictionary) {
1813     removeChunkFromIndexedFreeList(fc);
1814   } else {
1815     removeChunkFromDictionary(fc);
1816   }
1817   _bt.verify_single_block((HeapWord*)fc, size);
1818   debug_only(verifyFreeLists());
1819 }
1820 
1821 void
1822 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1823   size_t size = fc->size();
1824   assert_locked();
1825   assert(fc != NULL, "null chunk");
1826   _bt.verify_single_block((HeapWord*)fc, size);
1827   _dictionary->remove_chunk(fc);
1828   // adjust _unallocated_block upward, as necessary
1829   _bt.allocated((HeapWord*)fc, size);
1830 }
1831 
1832 void
1833 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1834   assert_locked();
1835   size_t size = fc->size();
1836   _bt.verify_single_block((HeapWord*)fc, size);
1837   NOT_PRODUCT(
1838     if (FLSVerifyIndexTable) {
1839       verifyIndexedFreeList(size);
1840     }
1841   )
1842   _indexedFreeList[size].remove_chunk(fc);
1843   NOT_PRODUCT(
1844     if (FLSVerifyIndexTable) {
1845       verifyIndexedFreeList(size);
1846     }
1847   )
1848 }
1849 
1850 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1851   /* A hint is the next larger size that has a surplus.
1852      Start search at a size large enough to guarantee that
1853      the excess is >= MIN_CHUNK. */
1854   size_t start = align_object_size(numWords + MinChunkSize);
1855   if (start < IndexSetSize) {
1856     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1857     size_t    hint = _indexedFreeList[start].hint();
1858     while (hint < IndexSetSize) {
1859       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1860       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1861       if (fl->surplus() > 0 && fl->head() != NULL) {
1862         // Found a list with surplus, reset original hint
1863         // and split out a free chunk which is returned.
1864         _indexedFreeList[start].set_hint(hint);
1865         FreeChunk* res = getFromListGreater(fl, numWords);
1866         assert(res == NULL || res->is_free(),
1867           "Should be returning a free chunk");
1868         return res;
1869       }
1870       hint = fl->hint(); /* keep looking */
1871     }
1872     /* None found. */
1873     it[start].set_hint(IndexSetSize);
1874   }
1875   return NULL;
1876 }
1877 
1878 /* Requires fl->size >= numWords + MinChunkSize */
1879 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1880   size_t numWords) {
1881   FreeChunk *curr = fl->head();
1882   size_t oldNumWords = curr->size();
1883   assert(numWords >= MinChunkSize, "Word size is too small");
1884   assert(curr != NULL, "List is empty");
1885   assert(oldNumWords >= numWords + MinChunkSize,
1886         "Size of chunks in the list is too small");
1887 
1888   fl->remove_chunk(curr);
1889   // recorded indirectly by splitChunkAndReturnRemainder -
1890   // smallSplit(oldNumWords, numWords);
1891   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1892   // Does anything have to be done for the remainder in terms of
1893   // fixing the card table?
1894   assert(new_chunk == NULL || new_chunk->is_free(),
1895     "Should be returning a free chunk");
1896   return new_chunk;
1897 }
1898 
1899 FreeChunk*
1900 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1901   size_t new_size) {
1902   assert_locked();
1903   size_t size = chunk->size();
1904   assert(size > new_size, "Split from a smaller block?");
1905   assert(is_aligned(chunk), "alignment problem");
1906   assert(size == adjustObjectSize(size), "alignment problem");
1907   size_t rem_size = size - new_size;
1908   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1909   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1910   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1911   assert(is_aligned(ffc), "alignment problem");
1912   ffc->set_size(rem_size);
1913   ffc->link_next(NULL);
1914   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1915   // Above must occur before BOT is updated below.
1916   // adjust block offset table
1917   OrderAccess::storestore();
1918   assert(chunk->is_free() && ffc->is_free(), "Error");
1919   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1920   if (rem_size < SmallForDictionary) {
1921     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1922     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1923     assert(!is_par ||
1924            (SharedHeap::heap()->n_par_threads() ==
1925             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1926     returnChunkToFreeList(ffc);
1927     split(size, rem_size);
1928     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1929   } else {
1930     returnChunkToDictionary(ffc);
1931     split(size ,rem_size);
1932   }
1933   chunk->set_size(new_size);
1934   return chunk;
1935 }
1936 
1937 void
1938 CompactibleFreeListSpace::sweep_completed() {
1939   // Now that space is probably plentiful, refill linear
1940   // allocation blocks as needed.
1941   refillLinearAllocBlocksIfNeeded();
1942 }
1943 
1944 void
1945 CompactibleFreeListSpace::gc_prologue() {
1946   assert_locked();
1947   if (PrintFLSStatistics != 0) {
1948     gclog_or_tty->print("Before GC:\n");
1949     reportFreeListStatistics();
1950   }
1951   refillLinearAllocBlocksIfNeeded();
1952 }
1953 
1954 void
1955 CompactibleFreeListSpace::gc_epilogue() {
1956   assert_locked();
1957   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1958     if (_smallLinearAllocBlock._word_size == 0)
1959       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1960   }
1961   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1962   _promoInfo.stopTrackingPromotions();
1963   repairLinearAllocationBlocks();
1964   // Print Space's stats
1965   if (PrintFLSStatistics != 0) {
1966     gclog_or_tty->print("After GC:\n");
1967     reportFreeListStatistics();
1968   }
1969 }
1970 
1971 // Iteration support, mostly delegated from a CMS generation
1972 
1973 void CompactibleFreeListSpace::save_marks() {
1974   assert(Thread::current()->is_VM_thread(),
1975          "Global variable should only be set when single-threaded");
1976   // Mark the "end" of the used space at the time of this call;
1977   // note, however, that promoted objects from this point
1978   // on are tracked in the _promoInfo below.
1979   set_saved_mark_word(unallocated_block());
1980 #ifdef ASSERT
1981   // Check the sanity of save_marks() etc.
1982   MemRegion ur    = used_region();
1983   MemRegion urasm = used_region_at_save_marks();
1984   assert(ur.contains(urasm),
1985          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1986                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1987                  ur.start(), ur.end(), urasm.start(), urasm.end()));
1988 #endif
1989   // inform allocator that promotions should be tracked.
1990   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1991   _promoInfo.startTrackingPromotions();
1992 }
1993 
1994 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1995   assert(_promoInfo.tracking(), "No preceding save_marks?");
1996   assert(SharedHeap::heap()->n_par_threads() == 0,
1997          "Shouldn't be called if using parallel gc.");
1998   return _promoInfo.noPromotions();
1999 }
2000 
2001 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2002                                                                             \
2003 void CompactibleFreeListSpace::                                             \
2004 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2005   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2006          "Shouldn't be called (yet) during parallel part of gc.");          \
2007   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2008   /*                                                                        \
2009    * This also restores any displaced headers and removes the elements from \
2010    * the iteration set as they are processed, so that we have a clean slate \
2011    * at the end of the iteration. Note, thus, that if new objects are       \
2012    * promoted as a result of the iteration they are iterated over as well.  \
2013    */                                                                       \
2014   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2015 }
2016 
2017 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2018 
2019 
2020 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2021   // ugghh... how would one do this efficiently for a non-contiguous space?
2022   guarantee(false, "NYI");
2023 }
2024 
2025 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2026   return _smallLinearAllocBlock._word_size == 0;
2027 }
2028 
2029 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2030   // Fix up linear allocation blocks to look like free blocks
2031   repairLinearAllocBlock(&_smallLinearAllocBlock);
2032 }
2033 
2034 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2035   assert_locked();
2036   if (blk->_ptr != NULL) {
2037     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2038            "Minimum block size requirement");
2039     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2040     fc->set_size(blk->_word_size);
2041     fc->link_prev(NULL);   // mark as free
2042     fc->dontCoalesce();
2043     assert(fc->is_free(), "just marked it free");
2044     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2045   }
2046 }
2047 
2048 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2049   assert_locked();
2050   if (_smallLinearAllocBlock._ptr == NULL) {
2051     assert(_smallLinearAllocBlock._word_size == 0,
2052       "Size of linAB should be zero if the ptr is NULL");
2053     // Reset the linAB refill and allocation size limit.
2054     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2055   }
2056   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2057 }
2058 
2059 void
2060 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2061   assert_locked();
2062   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2063          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2064          "blk invariant");
2065   if (blk->_ptr == NULL) {
2066     refillLinearAllocBlock(blk);
2067   }
2068   if (PrintMiscellaneous && Verbose) {
2069     if (blk->_word_size == 0) {
2070       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2071     }
2072   }
2073 }
2074 
2075 void
2076 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2077   assert_locked();
2078   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2079          "linear allocation block should be empty");
2080   FreeChunk* fc;
2081   if (blk->_refillSize < SmallForDictionary &&
2082       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2083     // A linAB's strategy might be to use small sizes to reduce
2084     // fragmentation but still get the benefits of allocation from a
2085     // linAB.
2086   } else {
2087     fc = getChunkFromDictionary(blk->_refillSize);
2088   }
2089   if (fc != NULL) {
2090     blk->_ptr  = (HeapWord*)fc;
2091     blk->_word_size = fc->size();
2092     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2093   }
2094 }
2095 
2096 // Support for concurrent collection policy decisions.
2097 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2098   // In the future we might want to add in frgamentation stats --
2099   // including erosion of the "mountain" into this decision as well.
2100   return !adaptive_freelists() && linearAllocationWouldFail();
2101 }
2102 
2103 // Support for compaction
2104 
2105 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2106   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2107   // prepare_for_compaction() uses the space between live objects
2108   // so that later phase can skip dead space quickly.  So verification
2109   // of the free lists doesn't work after.
2110 }
2111 
2112 #define obj_size(q) adjustObjectSize(oop(q)->size())
2113 #define adjust_obj_size(s) adjustObjectSize(s)
2114 
2115 void CompactibleFreeListSpace::adjust_pointers() {
2116   // In other versions of adjust_pointers(), a bail out
2117   // based on the amount of live data in the generation
2118   // (i.e., if 0, bail out) may be used.
2119   // Cannot test used() == 0 here because the free lists have already
2120   // been mangled by the compaction.
2121 
2122   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2123   // See note about verification in prepare_for_compaction().
2124 }
2125 
2126 void CompactibleFreeListSpace::compact() {
2127   SCAN_AND_COMPACT(obj_size);
2128 }
2129 
2130 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2131 // where fbs is free block sizes
2132 double CompactibleFreeListSpace::flsFrag() const {
2133   size_t itabFree = totalSizeInIndexedFreeLists();
2134   double frag = 0.0;
2135   size_t i;
2136 
2137   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2138     double sz  = i;
2139     frag      += _indexedFreeList[i].count() * (sz * sz);
2140   }
2141 
2142   double totFree = itabFree +
2143                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2144   if (totFree > 0) {
2145     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2146             (totFree * totFree));
2147     frag = (double)1.0  - frag;
2148   } else {
2149     assert(frag == 0.0, "Follows from totFree == 0");
2150   }
2151   return frag;
2152 }
2153 
2154 void CompactibleFreeListSpace::beginSweepFLCensus(
2155   float inter_sweep_current,
2156   float inter_sweep_estimate,
2157   float intra_sweep_estimate) {
2158   assert_locked();
2159   size_t i;
2160   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2161     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2162     if (PrintFLSStatistics > 1) {
2163       gclog_or_tty->print("size[%d] : ", i);
2164     }
2165     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2166     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2167     fl->set_before_sweep(fl->count());
2168     fl->set_bfr_surp(fl->surplus());
2169   }
2170   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2171                                     inter_sweep_current,
2172                                     inter_sweep_estimate,
2173                                     intra_sweep_estimate);
2174 }
2175 
2176 void CompactibleFreeListSpace::setFLSurplus() {
2177   assert_locked();
2178   size_t i;
2179   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2180     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2181     fl->set_surplus(fl->count() -
2182                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2183   }
2184 }
2185 
2186 void CompactibleFreeListSpace::setFLHints() {
2187   assert_locked();
2188   size_t i;
2189   size_t h = IndexSetSize;
2190   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2191     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2192     fl->set_hint(h);
2193     if (fl->surplus() > 0) {
2194       h = i;
2195     }
2196   }
2197 }
2198 
2199 void CompactibleFreeListSpace::clearFLCensus() {
2200   assert_locked();
2201   size_t i;
2202   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2203     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2204     fl->set_prev_sweep(fl->count());
2205     fl->set_coal_births(0);
2206     fl->set_coal_deaths(0);
2207     fl->set_split_births(0);
2208     fl->set_split_deaths(0);
2209   }
2210 }
2211 
2212 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2213   if (PrintFLSStatistics > 0) {
2214     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2215     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2216                            largestAddr);
2217   }
2218   setFLSurplus();
2219   setFLHints();
2220   if (PrintGC && PrintFLSCensus > 0) {
2221     printFLCensus(sweep_count);
2222   }
2223   clearFLCensus();
2224   assert_locked();
2225   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2226 }
2227 
2228 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2229   if (size < SmallForDictionary) {
2230     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2231     return (fl->coal_desired() < 0) ||
2232            ((int)fl->count() > fl->coal_desired());
2233   } else {
2234     return dictionary()->coal_dict_over_populated(size);
2235   }
2236 }
2237 
2238 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2239   assert(size < SmallForDictionary, "Size too large for indexed list");
2240   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2241   fl->increment_coal_births();
2242   fl->increment_surplus();
2243 }
2244 
2245 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2246   assert(size < SmallForDictionary, "Size too large for indexed list");
2247   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2248   fl->increment_coal_deaths();
2249   fl->decrement_surplus();
2250 }
2251 
2252 void CompactibleFreeListSpace::coalBirth(size_t size) {
2253   if (size  < SmallForDictionary) {
2254     smallCoalBirth(size);
2255   } else {
2256     dictionary()->dict_census_update(size,
2257                                    false /* split */,
2258                                    true /* birth */);
2259   }
2260 }
2261 
2262 void CompactibleFreeListSpace::coalDeath(size_t size) {
2263   if(size  < SmallForDictionary) {
2264     smallCoalDeath(size);
2265   } else {
2266     dictionary()->dict_census_update(size,
2267                                    false /* split */,
2268                                    false /* birth */);
2269   }
2270 }
2271 
2272 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2273   assert(size < SmallForDictionary, "Size too large for indexed list");
2274   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2275   fl->increment_split_births();
2276   fl->increment_surplus();
2277 }
2278 
2279 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2280   assert(size < SmallForDictionary, "Size too large for indexed list");
2281   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2282   fl->increment_split_deaths();
2283   fl->decrement_surplus();
2284 }
2285 
2286 void CompactibleFreeListSpace::split_birth(size_t size) {
2287   if (size  < SmallForDictionary) {
2288     smallSplitBirth(size);
2289   } else {
2290     dictionary()->dict_census_update(size,
2291                                    true /* split */,
2292                                    true /* birth */);
2293   }
2294 }
2295 
2296 void CompactibleFreeListSpace::splitDeath(size_t size) {
2297   if (size  < SmallForDictionary) {
2298     smallSplitDeath(size);
2299   } else {
2300     dictionary()->dict_census_update(size,
2301                                    true /* split */,
2302                                    false /* birth */);
2303   }
2304 }
2305 
2306 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2307   size_t to2 = from - to1;
2308   splitDeath(from);
2309   split_birth(to1);
2310   split_birth(to2);
2311 }
2312 
2313 void CompactibleFreeListSpace::print() const {
2314   print_on(tty);
2315 }
2316 
2317 void CompactibleFreeListSpace::prepare_for_verify() {
2318   assert_locked();
2319   repairLinearAllocationBlocks();
2320   // Verify that the SpoolBlocks look like free blocks of
2321   // appropriate sizes... To be done ...
2322 }
2323 
2324 class VerifyAllBlksClosure: public BlkClosure {
2325  private:
2326   const CompactibleFreeListSpace* _sp;
2327   const MemRegion                 _span;
2328   HeapWord*                       _last_addr;
2329   size_t                          _last_size;
2330   bool                            _last_was_obj;
2331   bool                            _last_was_live;
2332 
2333  public:
2334   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2335     MemRegion span) :  _sp(sp), _span(span),
2336                        _last_addr(NULL), _last_size(0),
2337                        _last_was_obj(false), _last_was_live(false) { }
2338 
2339   virtual size_t do_blk(HeapWord* addr) {
2340     size_t res;
2341     bool   was_obj  = false;
2342     bool   was_live = false;
2343     if (_sp->block_is_obj(addr)) {
2344       was_obj = true;
2345       oop p = oop(addr);
2346       guarantee(p->is_oop(), "Should be an oop");
2347       res = _sp->adjustObjectSize(p->size());
2348       if (_sp->obj_is_alive(addr)) {
2349         was_live = true;
2350         p->verify();
2351       }
2352     } else {
2353       FreeChunk* fc = (FreeChunk*)addr;
2354       res = fc->size();
2355       if (FLSVerifyLists && !fc->cantCoalesce()) {
2356         guarantee(_sp->verify_chunk_in_free_list(fc),
2357                   "Chunk should be on a free list");
2358       }
2359     }
2360     if (res == 0) {
2361       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2362       gclog_or_tty->print_cr(
2363         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2364         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2365         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2366         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2367       _sp->print_on(gclog_or_tty);
2368       guarantee(false, "Seppuku!");
2369     }
2370     _last_addr = addr;
2371     _last_size = res;
2372     _last_was_obj  = was_obj;
2373     _last_was_live = was_live;
2374     return res;
2375   }
2376 };
2377 
2378 class VerifyAllOopsClosure: public OopClosure {
2379  private:
2380   const CMSCollector*             _collector;
2381   const CompactibleFreeListSpace* _sp;
2382   const MemRegion                 _span;
2383   const bool                      _past_remark;
2384   const CMSBitMap*                _bit_map;
2385 
2386  protected:
2387   void do_oop(void* p, oop obj) {
2388     if (_span.contains(obj)) { // the interior oop points into CMS heap
2389       if (!_span.contains(p)) { // reference from outside CMS heap
2390         // Should be a valid object; the first disjunct below allows
2391         // us to sidestep an assertion in block_is_obj() that insists
2392         // that p be in _sp. Note that several generations (and spaces)
2393         // are spanned by _span (CMS heap) above.
2394         guarantee(!_sp->is_in_reserved(obj) ||
2395                   _sp->block_is_obj((HeapWord*)obj),
2396                   "Should be an object");
2397         guarantee(obj->is_oop(), "Should be an oop");
2398         obj->verify();
2399         if (_past_remark) {
2400           // Remark has been completed, the object should be marked
2401           _bit_map->isMarked((HeapWord*)obj);
2402         }
2403       } else { // reference within CMS heap
2404         if (_past_remark) {
2405           // Remark has been completed -- so the referent should have
2406           // been marked, if referring object is.
2407           if (_bit_map->isMarked(_collector->block_start(p))) {
2408             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2409           }
2410         }
2411       }
2412     } else if (_sp->is_in_reserved(p)) {
2413       // the reference is from FLS, and points out of FLS
2414       guarantee(obj->is_oop(), "Should be an oop");
2415       obj->verify();
2416     }
2417   }
2418 
2419   template <class T> void do_oop_work(T* p) {
2420     T heap_oop = oopDesc::load_heap_oop(p);
2421     if (!oopDesc::is_null(heap_oop)) {
2422       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2423       do_oop(p, obj);
2424     }
2425   }
2426 
2427  public:
2428   VerifyAllOopsClosure(const CMSCollector* collector,
2429     const CompactibleFreeListSpace* sp, MemRegion span,
2430     bool past_remark, CMSBitMap* bit_map) :
2431     _collector(collector), _sp(sp), _span(span),
2432     _past_remark(past_remark), _bit_map(bit_map) { }
2433 
2434   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2435   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2436 };
2437 
2438 void CompactibleFreeListSpace::verify() const {
2439   assert_lock_strong(&_freelistLock);
2440   verify_objects_initialized();
2441   MemRegion span = _collector->_span;
2442   bool past_remark = (_collector->abstract_state() ==
2443                       CMSCollector::Sweeping);
2444 
2445   ResourceMark rm;
2446   HandleMark  hm;
2447 
2448   // Check integrity of CFL data structures
2449   _promoInfo.verify();
2450   _dictionary->verify();
2451   if (FLSVerifyIndexTable) {
2452     verifyIndexedFreeLists();
2453   }
2454   // Check integrity of all objects and free blocks in space
2455   {
2456     VerifyAllBlksClosure cl(this, span);
2457     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2458   }
2459   // Check that all references in the heap to FLS
2460   // are to valid objects in FLS or that references in
2461   // FLS are to valid objects elsewhere in the heap
2462   if (FLSVerifyAllHeapReferences)
2463   {
2464     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2465       _collector->markBitMap());
2466     CollectedHeap* ch = Universe::heap();
2467 
2468     // Iterate over all oops in the heap. Uses the _no_header version
2469     // since we are not interested in following the klass pointers.
2470     ch->oop_iterate_no_header(&cl);
2471   }
2472 
2473   if (VerifyObjectStartArray) {
2474     // Verify the block offset table
2475     _bt.verify();
2476   }
2477 }
2478 
2479 #ifndef PRODUCT
2480 void CompactibleFreeListSpace::verifyFreeLists() const {
2481   if (FLSVerifyLists) {
2482     _dictionary->verify();
2483     verifyIndexedFreeLists();
2484   } else {
2485     if (FLSVerifyDictionary) {
2486       _dictionary->verify();
2487     }
2488     if (FLSVerifyIndexTable) {
2489       verifyIndexedFreeLists();
2490     }
2491   }
2492 }
2493 #endif
2494 
2495 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2496   size_t i = 0;
2497   for (; i < IndexSetStart; i++) {
2498     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2499   }
2500   for (; i < IndexSetSize; i++) {
2501     verifyIndexedFreeList(i);
2502   }
2503 }
2504 
2505 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2506   FreeChunk* fc   =  _indexedFreeList[size].head();
2507   FreeChunk* tail =  _indexedFreeList[size].tail();
2508   size_t    num = _indexedFreeList[size].count();
2509   size_t      n = 0;
2510   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2511             "Slot should have been empty");
2512   for (; fc != NULL; fc = fc->next(), n++) {
2513     guarantee(fc->size() == size, "Size inconsistency");
2514     guarantee(fc->is_free(), "!free?");
2515     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2516     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2517   }
2518   guarantee(n == num, "Incorrect count");
2519 }
2520 
2521 #ifndef PRODUCT
2522 void CompactibleFreeListSpace::check_free_list_consistency() const {
2523   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
2524     "Some sizes can't be allocated without recourse to"
2525     " linear allocation buffers");
2526   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
2527     "else MIN_TREE_CHUNK_SIZE is wrong");
2528   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2529   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2530 }
2531 #endif
2532 
2533 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2534   assert_lock_strong(&_freelistLock);
2535   AdaptiveFreeList<FreeChunk> total;
2536   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2537   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2538   size_t total_free = 0;
2539   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2540     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2541     total_free += fl->count() * fl->size();
2542     if (i % (40*IndexSetStride) == 0) {
2543       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2544     }
2545     fl->print_on(gclog_or_tty);
2546     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2547     total.set_surplus(    total.surplus()     + fl->surplus()    );
2548     total.set_desired(    total.desired()     + fl->desired()    );
2549     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2550     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2551     total.set_count(      total.count()       + fl->count()      );
2552     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2553     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2554     total.set_split_births(total.split_births() + fl->split_births());
2555     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2556   }
2557   total.print_on(gclog_or_tty, "TOTAL");
2558   gclog_or_tty->print_cr("Total free in indexed lists "
2559                          SIZE_FORMAT " words", total_free);
2560   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2561     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2562             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2563     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2564   _dictionary->print_dict_census();
2565 }
2566 
2567 ///////////////////////////////////////////////////////////////////////////
2568 // CFLS_LAB
2569 ///////////////////////////////////////////////////////////////////////////
2570 
2571 #define VECTOR_257(x)                                                                                  \
2572   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2573   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2574      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2575      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2576      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2577      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2578      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2579      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2580      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2581      x }
2582 
2583 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2584 // OldPLABSize, whose static default is different; if overridden at the
2585 // command-line, this will get reinitialized via a call to
2586 // modify_initialization() below.
2587 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2588   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2589 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2590 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2591 
2592 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2593   _cfls(cfls)
2594 {
2595   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2596   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2597        i < CompactibleFreeListSpace::IndexSetSize;
2598        i += CompactibleFreeListSpace::IndexSetStride) {
2599     _indexedFreeList[i].set_size(i);
2600     _num_blocks[i] = 0;
2601   }
2602 }
2603 
2604 static bool _CFLS_LAB_modified = false;
2605 
2606 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2607   assert(!_CFLS_LAB_modified, "Call only once");
2608   _CFLS_LAB_modified = true;
2609   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2610        i < CompactibleFreeListSpace::IndexSetSize;
2611        i += CompactibleFreeListSpace::IndexSetStride) {
2612     _blocks_to_claim[i].modify(n, wt, true /* force */);
2613   }
2614 }
2615 
2616 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2617   FreeChunk* res;
2618   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2619   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2620     // This locking manages sync with other large object allocations.
2621     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2622                     Mutex::_no_safepoint_check_flag);
2623     res = _cfls->getChunkFromDictionaryExact(word_sz);
2624     if (res == NULL) return NULL;
2625   } else {
2626     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2627     if (fl->count() == 0) {
2628       // Attempt to refill this local free list.
2629       get_from_global_pool(word_sz, fl);
2630       // If it didn't work, give up.
2631       if (fl->count() == 0) return NULL;
2632     }
2633     res = fl->get_chunk_at_head();
2634     assert(res != NULL, "Why was count non-zero?");
2635   }
2636   res->markNotFree();
2637   assert(!res->is_free(), "shouldn't be marked free");
2638   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2639   // mangle a just allocated object with a distinct pattern.
2640   debug_only(res->mangleAllocated(word_sz));
2641   return (HeapWord*)res;
2642 }
2643 
2644 // Get a chunk of blocks of the right size and update related
2645 // book-keeping stats
2646 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2647   // Get the #blocks we want to claim
2648   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2649   assert(n_blks > 0, "Error");
2650   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2651   // In some cases, when the application has a phase change,
2652   // there may be a sudden and sharp shift in the object survival
2653   // profile, and updating the counts at the end of a scavenge
2654   // may not be quick enough, giving rise to large scavenge pauses
2655   // during these phase changes. It is beneficial to detect such
2656   // changes on-the-fly during a scavenge and avoid such a phase-change
2657   // pothole. The following code is a heuristic attempt to do that.
2658   // It is protected by a product flag until we have gained
2659   // enough experience with this heuristic and fine-tuned its behaviour.
2660   // WARNING: This might increase fragmentation if we overreact to
2661   // small spikes, so some kind of historical smoothing based on
2662   // previous experience with the greater reactivity might be useful.
2663   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2664   // default.
2665   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2666     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2667     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2668     n_blks = MIN2(n_blks, CMSOldPLABMax);
2669   }
2670   assert(n_blks > 0, "Error");
2671   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2672   // Update stats table entry for this block size
2673   _num_blocks[word_sz] += fl->count();
2674 }
2675 
2676 void CFLS_LAB::compute_desired_plab_size() {
2677   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2678        i < CompactibleFreeListSpace::IndexSetSize;
2679        i += CompactibleFreeListSpace::IndexSetStride) {
2680     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2681            "Counter inconsistency");
2682     if (_global_num_workers[i] > 0) {
2683       // Need to smooth wrt historical average
2684       if (ResizeOldPLAB) {
2685         _blocks_to_claim[i].sample(
2686           MAX2((size_t)CMSOldPLABMin,
2687           MIN2((size_t)CMSOldPLABMax,
2688                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2689       }
2690       // Reset counters for next round
2691       _global_num_workers[i] = 0;
2692       _global_num_blocks[i] = 0;
2693       if (PrintOldPLAB) {
2694         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2695       }
2696     }
2697   }
2698 }
2699 
2700 // If this is changed in the future to allow parallel
2701 // access, one would need to take the FL locks and,
2702 // depending on how it is used, stagger access from
2703 // parallel threads to reduce contention.
2704 void CFLS_LAB::retire(int tid) {
2705   // We run this single threaded with the world stopped;
2706   // so no need for locks and such.
2707   NOT_PRODUCT(Thread* t = Thread::current();)
2708   assert(Thread::current()->is_VM_thread(), "Error");
2709   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2710        i < CompactibleFreeListSpace::IndexSetSize;
2711        i += CompactibleFreeListSpace::IndexSetStride) {
2712     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2713            "Can't retire more than what we obtained");
2714     if (_num_blocks[i] > 0) {
2715       size_t num_retire =  _indexedFreeList[i].count();
2716       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2717       {
2718         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2719         //                Mutex::_no_safepoint_check_flag);
2720 
2721         // Update globals stats for num_blocks used
2722         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2723         _global_num_workers[i]++;
2724         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2725         if (num_retire > 0) {
2726           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2727           // Reset this list.
2728           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2729           _indexedFreeList[i].set_size(i);
2730         }
2731       }
2732       if (PrintOldPLAB) {
2733         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2734                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2735       }
2736       // Reset stats for next round
2737       _num_blocks[i]         = 0;
2738     }
2739   }
2740 }
2741 
2742 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2743   assert(fl->count() == 0, "Precondition.");
2744   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2745          "Precondition");
2746 
2747   // We'll try all multiples of word_sz in the indexed set, starting with
2748   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2749   // then try getting a big chunk and splitting it.
2750   {
2751     bool found;
2752     int  k;
2753     size_t cur_sz;
2754     for (k = 1, cur_sz = k * word_sz, found = false;
2755          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2756          (CMSSplitIndexedFreeListBlocks || k <= 1);
2757          k++, cur_sz = k * word_sz) {
2758       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2759       fl_for_cur_sz.set_size(cur_sz);
2760       {
2761         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2762                         Mutex::_no_safepoint_check_flag);
2763         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2764         if (gfl->count() != 0) {
2765           // nn is the number of chunks of size cur_sz that
2766           // we'd need to split k-ways each, in order to create
2767           // "n" chunks of size word_sz each.
2768           const size_t nn = MAX2(n/k, (size_t)1);
2769           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2770           found = true;
2771           if (k > 1) {
2772             // Update split death stats for the cur_sz-size blocks list:
2773             // we increment the split death count by the number of blocks
2774             // we just took from the cur_sz-size blocks list and which
2775             // we will be splitting below.
2776             ssize_t deaths = gfl->split_deaths() +
2777                              fl_for_cur_sz.count();
2778             gfl->set_split_deaths(deaths);
2779           }
2780         }
2781       }
2782       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2783       if (found) {
2784         if (k == 1) {
2785           fl->prepend(&fl_for_cur_sz);
2786         } else {
2787           // Divide each block on fl_for_cur_sz up k ways.
2788           FreeChunk* fc;
2789           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2790             // Must do this in reverse order, so that anybody attempting to
2791             // access the main chunk sees it as a single free block until we
2792             // change it.
2793             size_t fc_size = fc->size();
2794             assert(fc->is_free(), "Error");
2795             for (int i = k-1; i >= 0; i--) {
2796               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2797               assert((i != 0) ||
2798                         ((fc == ffc) && ffc->is_free() &&
2799                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2800                         "Counting error");
2801               ffc->set_size(word_sz);
2802               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2803               ffc->link_next(NULL);
2804               // Above must occur before BOT is updated below.
2805               OrderAccess::storestore();
2806               // splitting from the right, fc_size == i * word_sz
2807               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2808               fc_size -= word_sz;
2809               assert(fc_size == i*word_sz, "Error");
2810               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2811               _bt.verify_single_block((HeapWord*)fc, fc_size);
2812               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2813               // Push this on "fl".
2814               fl->return_chunk_at_head(ffc);
2815             }
2816             // TRAP
2817             assert(fl->tail()->next() == NULL, "List invariant.");
2818           }
2819         }
2820         // Update birth stats for this block size.
2821         size_t num = fl->count();
2822         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2823                         Mutex::_no_safepoint_check_flag);
2824         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2825         _indexedFreeList[word_sz].set_split_births(births);
2826         return;
2827       }
2828     }
2829   }
2830   // Otherwise, we'll split a block from the dictionary.
2831   FreeChunk* fc = NULL;
2832   FreeChunk* rem_fc = NULL;
2833   size_t rem;
2834   {
2835     MutexLockerEx x(parDictionaryAllocLock(),
2836                     Mutex::_no_safepoint_check_flag);
2837     while (n > 0) {
2838       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2839                                   FreeBlockDictionary<FreeChunk>::atLeast);
2840       if (fc != NULL) {
2841         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2842         dictionary()->dict_census_update(fc->size(),
2843                                        true /*split*/,
2844                                        false /*birth*/);
2845         break;
2846       } else {
2847         n--;
2848       }
2849     }
2850     if (fc == NULL) return;
2851     // Otherwise, split up that block.
2852     assert((ssize_t)n >= 1, "Control point invariant");
2853     assert(fc->is_free(), "Error: should be a free block");
2854     _bt.verify_single_block((HeapWord*)fc, fc->size());
2855     const size_t nn = fc->size() / word_sz;
2856     n = MIN2(nn, n);
2857     assert((ssize_t)n >= 1, "Control point invariant");
2858     rem = fc->size() - n * word_sz;
2859     // If there is a remainder, and it's too small, allocate one fewer.
2860     if (rem > 0 && rem < MinChunkSize) {
2861       n--; rem += word_sz;
2862     }
2863     // Note that at this point we may have n == 0.
2864     assert((ssize_t)n >= 0, "Control point invariant");
2865 
2866     // If n is 0, the chunk fc that was found is not large
2867     // enough to leave a viable remainder.  We are unable to
2868     // allocate even one block.  Return fc to the
2869     // dictionary and return, leaving "fl" empty.
2870     if (n == 0) {
2871       returnChunkToDictionary(fc);
2872       assert(fl->count() == 0, "We never allocated any blocks");
2873       return;
2874     }
2875 
2876     // First return the remainder, if any.
2877     // Note that we hold the lock until we decide if we're going to give
2878     // back the remainder to the dictionary, since a concurrent allocation
2879     // may otherwise see the heap as empty.  (We're willing to take that
2880     // hit if the block is a small block.)
2881     if (rem > 0) {
2882       size_t prefix_size = n * word_sz;
2883       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2884       rem_fc->set_size(rem);
2885       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2886       rem_fc->link_next(NULL);
2887       // Above must occur before BOT is updated below.
2888       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2889       OrderAccess::storestore();
2890       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2891       assert(fc->is_free(), "Error");
2892       fc->set_size(prefix_size);
2893       if (rem >= IndexSetSize) {
2894         returnChunkToDictionary(rem_fc);
2895         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2896         rem_fc = NULL;
2897       }
2898       // Otherwise, return it to the small list below.
2899     }
2900   }
2901   if (rem_fc != NULL) {
2902     MutexLockerEx x(_indexedFreeListParLocks[rem],
2903                     Mutex::_no_safepoint_check_flag);
2904     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2905     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2906     smallSplitBirth(rem);
2907   }
2908   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2909   // Now do the splitting up.
2910   // Must do this in reverse order, so that anybody attempting to
2911   // access the main chunk sees it as a single free block until we
2912   // change it.
2913   size_t fc_size = n * word_sz;
2914   // All but first chunk in this loop
2915   for (ssize_t i = n-1; i > 0; i--) {
2916     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2917     ffc->set_size(word_sz);
2918     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2919     ffc->link_next(NULL);
2920     // Above must occur before BOT is updated below.
2921     OrderAccess::storestore();
2922     // splitting from the right, fc_size == (n - i + 1) * wordsize
2923     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2924     fc_size -= word_sz;
2925     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2926     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2927     _bt.verify_single_block((HeapWord*)fc, fc_size);
2928     // Push this on "fl".
2929     fl->return_chunk_at_head(ffc);
2930   }
2931   // First chunk
2932   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2933   // The blocks above should show their new sizes before the first block below
2934   fc->set_size(word_sz);
2935   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2936   fc->link_next(NULL);
2937   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2938   _bt.verify_single_block((HeapWord*)fc, fc->size());
2939   fl->return_chunk_at_head(fc);
2940 
2941   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2942   {
2943     // Update the stats for this block size.
2944     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2945                     Mutex::_no_safepoint_check_flag);
2946     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2947     _indexedFreeList[word_sz].set_split_births(births);
2948     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2949     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2950   }
2951 
2952   // TRAP
2953   assert(fl->tail()->next() == NULL, "List invariant.");
2954 }
2955 
2956 // Set up the space's par_seq_tasks structure for work claiming
2957 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2958 // XXX Need to suitably abstract and generalize this and the next
2959 // method into one.
2960 void
2961 CompactibleFreeListSpace::
2962 initialize_sequential_subtasks_for_rescan(int n_threads) {
2963   // The "size" of each task is fixed according to rescan_task_size.
2964   assert(n_threads > 0, "Unexpected n_threads argument");
2965   const size_t task_size = rescan_task_size();
2966   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2967   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2968   assert(n_tasks == 0 ||
2969          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2970           (used_region().start() + n_tasks*task_size >= used_region().end())),
2971          "n_tasks calculation incorrect");
2972   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2973   assert(!pst->valid(), "Clobbering existing data?");
2974   // Sets the condition for completion of the subtask (how many threads
2975   // need to finish in order to be done).
2976   pst->set_n_threads(n_threads);
2977   pst->set_n_tasks((int)n_tasks);
2978 }
2979 
2980 // Set up the space's par_seq_tasks structure for work claiming
2981 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2982 void
2983 CompactibleFreeListSpace::
2984 initialize_sequential_subtasks_for_marking(int n_threads,
2985                                            HeapWord* low) {
2986   // The "size" of each task is fixed according to rescan_task_size.
2987   assert(n_threads > 0, "Unexpected n_threads argument");
2988   const size_t task_size = marking_task_size();
2989   assert(task_size > CardTableModRefBS::card_size_in_words &&
2990          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2991          "Otherwise arithmetic below would be incorrect");
2992   MemRegion span = _gen->reserved();
2993   if (low != NULL) {
2994     if (span.contains(low)) {
2995       // Align low down to  a card boundary so that
2996       // we can use block_offset_careful() on span boundaries.
2997       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2998                                  CardTableModRefBS::card_size);
2999       // Clip span prefix at aligned_low
3000       span = span.intersection(MemRegion(aligned_low, span.end()));
3001     } else if (low > span.end()) {
3002       span = MemRegion(low, low);  // Null region
3003     } // else use entire span
3004   }
3005   assert(span.is_empty() ||
3006          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3007         "span should start at a card boundary");
3008   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3009   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3010   assert(n_tasks == 0 ||
3011          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3012           (span.start() + n_tasks*task_size >= span.end())),
3013          "n_tasks calculation incorrect");
3014   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3015   assert(!pst->valid(), "Clobbering existing data?");
3016   // Sets the condition for completion of the subtask (how many threads
3017   // need to finish in order to be done).
3018   pst->set_n_threads(n_threads);
3019   pst->set_n_tasks((int)n_tasks);
3020 }