1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/cms/cmsCollectorPolicy.hpp"
  31 #include "gc/cms/cmsOopClosures.inline.hpp"
  32 #include "gc/cms/compactibleFreeListSpace.hpp"
  33 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  34 #include "gc/cms/concurrentMarkSweepThread.hpp"
  35 #include "gc/cms/parNewGeneration.hpp"
  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "memory/padded.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "runtime/atomic.inline.hpp"
  63 #include "runtime/globals_extension.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/orderAccess.inline.hpp"
  67 #include "runtime/vmThread.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/stack.inline.hpp"
  71 
  72 // statics
  73 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  74 bool CMSCollector::_full_gc_requested = false;
  75 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  76 
  77 //////////////////////////////////////////////////////////////////
  78 // In support of CMS/VM thread synchronization
  79 //////////////////////////////////////////////////////////////////
  80 // We split use of the CGC_lock into 2 "levels".
  81 // The low-level locking is of the usual CGC_lock monitor. We introduce
  82 // a higher level "token" (hereafter "CMS token") built on top of the
  83 // low level monitor (hereafter "CGC lock").
  84 // The token-passing protocol gives priority to the VM thread. The
  85 // CMS-lock doesn't provide any fairness guarantees, but clients
  86 // should ensure that it is only held for very short, bounded
  87 // durations.
  88 //
  89 // When either of the CMS thread or the VM thread is involved in
  90 // collection operations during which it does not want the other
  91 // thread to interfere, it obtains the CMS token.
  92 //
  93 // If either thread tries to get the token while the other has
  94 // it, that thread waits. However, if the VM thread and CMS thread
  95 // both want the token, then the VM thread gets priority while the
  96 // CMS thread waits. This ensures, for instance, that the "concurrent"
  97 // phases of the CMS thread's work do not block out the VM thread
  98 // for long periods of time as the CMS thread continues to hog
  99 // the token. (See bug 4616232).
 100 //
 101 // The baton-passing functions are, however, controlled by the
 102 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 103 // and here the low-level CMS lock, not the high level token,
 104 // ensures mutual exclusion.
 105 //
 106 // Two important conditions that we have to satisfy:
 107 // 1. if a thread does a low-level wait on the CMS lock, then it
 108 //    relinquishes the CMS token if it were holding that token
 109 //    when it acquired the low-level CMS lock.
 110 // 2. any low-level notifications on the low-level lock
 111 //    should only be sent when a thread has relinquished the token.
 112 //
 113 // In the absence of either property, we'd have potential deadlock.
 114 //
 115 // We protect each of the CMS (concurrent and sequential) phases
 116 // with the CMS _token_, not the CMS _lock_.
 117 //
 118 // The only code protected by CMS lock is the token acquisition code
 119 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 120 // baton-passing code.
 121 //
 122 // Unfortunately, i couldn't come up with a good abstraction to factor and
 123 // hide the naked CGC_lock manipulation in the baton-passing code
 124 // further below. That's something we should try to do. Also, the proof
 125 // of correctness of this 2-level locking scheme is far from obvious,
 126 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 127 // that there may be a theoretical possibility of delay/starvation in the
 128 // low-level lock/wait/notify scheme used for the baton-passing because of
 129 // potential interference with the priority scheme embodied in the
 130 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 131 // invocation further below and marked with "XXX 20011219YSR".
 132 // Indeed, as we note elsewhere, this may become yet more slippery
 133 // in the presence of multiple CMS and/or multiple VM threads. XXX
 134 
 135 class CMSTokenSync: public StackObj {
 136  private:
 137   bool _is_cms_thread;
 138  public:
 139   CMSTokenSync(bool is_cms_thread):
 140     _is_cms_thread(is_cms_thread) {
 141     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 142            "Incorrect argument to constructor");
 143     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 144   }
 145 
 146   ~CMSTokenSync() {
 147     assert(_is_cms_thread ?
 148              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 149              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 150           "Incorrect state");
 151     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 152   }
 153 };
 154 
 155 // Convenience class that does a CMSTokenSync, and then acquires
 156 // upto three locks.
 157 class CMSTokenSyncWithLocks: public CMSTokenSync {
 158  private:
 159   // Note: locks are acquired in textual declaration order
 160   // and released in the opposite order
 161   MutexLockerEx _locker1, _locker2, _locker3;
 162  public:
 163   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 164                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 165     CMSTokenSync(is_cms_thread),
 166     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 167     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 168     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 169   { }
 170 };
 171 
 172 
 173 //////////////////////////////////////////////////////////////////
 174 //  Concurrent Mark-Sweep Generation /////////////////////////////
 175 //////////////////////////////////////////////////////////////////
 176 
 177 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 178 
 179 // This struct contains per-thread things necessary to support parallel
 180 // young-gen collection.
 181 class CMSParGCThreadState: public CHeapObj<mtGC> {
 182  public:
 183   CFLS_LAB lab;
 184   PromotionInfo promo;
 185 
 186   // Constructor.
 187   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 188     promo.setSpace(cfls);
 189   }
 190 };
 191 
 192 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 193      ReservedSpace rs, size_t initial_byte_size, int level,
 194      CardTableRS* ct, bool use_adaptive_freelists,
 195      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 196   CardGeneration(rs, initial_byte_size, level, ct),
 197   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 198   _did_compact(false)
 199 {
 200   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 201   HeapWord* end    = (HeapWord*) _virtual_space.high();
 202 
 203   _direct_allocated_words = 0;
 204   NOT_PRODUCT(
 205     _numObjectsPromoted = 0;
 206     _numWordsPromoted = 0;
 207     _numObjectsAllocated = 0;
 208     _numWordsAllocated = 0;
 209   )
 210 
 211   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 212                                            use_adaptive_freelists,
 213                                            dictionaryChoice);
 214   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 215   _cmsSpace->_gen = this;
 216 
 217   _gc_stats = new CMSGCStats();
 218 
 219   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 220   // offsets match. The ability to tell free chunks from objects
 221   // depends on this property.
 222   debug_only(
 223     FreeChunk* junk = NULL;
 224     assert(UseCompressedClassPointers ||
 225            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 226            "Offset of FreeChunk::_prev within FreeChunk must match"
 227            "  that of OopDesc::_klass within OopDesc");
 228   )
 229 
 230   _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
 231   for (uint i = 0; i < ParallelGCThreads; i++) {
 232     _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 233   }
 234 
 235   _incremental_collection_failed = false;
 236   // The "dilatation_factor" is the expansion that can occur on
 237   // account of the fact that the minimum object size in the CMS
 238   // generation may be larger than that in, say, a contiguous young
 239   //  generation.
 240   // Ideally, in the calculation below, we'd compute the dilatation
 241   // factor as: MinChunkSize/(promoting_gen's min object size)
 242   // Since we do not have such a general query interface for the
 243   // promoting generation, we'll instead just use the minimum
 244   // object size (which today is a header's worth of space);
 245   // note that all arithmetic is in units of HeapWords.
 246   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 247   assert(_dilatation_factor >= 1.0, "from previous assert");
 248 }
 249 
 250 
 251 // The field "_initiating_occupancy" represents the occupancy percentage
 252 // at which we trigger a new collection cycle.  Unless explicitly specified
 253 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 254 // is calculated by:
 255 //
 256 //   Let "f" be MinHeapFreeRatio in
 257 //
 258 //    _initiating_occupancy = 100-f +
 259 //                           f * (CMSTriggerRatio/100)
 260 //   where CMSTriggerRatio is the argument "tr" below.
 261 //
 262 // That is, if we assume the heap is at its desired maximum occupancy at the
 263 // end of a collection, we let CMSTriggerRatio of the (purported) free
 264 // space be allocated before initiating a new collection cycle.
 265 //
 266 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 267   assert(io <= 100 && tr <= 100, "Check the arguments");
 268   if (io >= 0) {
 269     _initiating_occupancy = (double)io / 100.0;
 270   } else {
 271     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 272                              (double)(tr * MinHeapFreeRatio) / 100.0)
 273                             / 100.0;
 274   }
 275 }
 276 
 277 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 278   assert(collector() != NULL, "no collector");
 279   collector()->ref_processor_init();
 280 }
 281 
 282 void CMSCollector::ref_processor_init() {
 283   if (_ref_processor == NULL) {
 284     // Allocate and initialize a reference processor
 285     _ref_processor =
 286       new ReferenceProcessor(_span,                               // span
 287                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 288                              (int) ParallelGCThreads,             // mt processing degree
 289                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 290                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 291                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 292                              &_is_alive_closure);                 // closure for liveness info
 293     // Initialize the _ref_processor field of CMSGen
 294     _cmsGen->set_ref_processor(_ref_processor);
 295 
 296   }
 297 }
 298 
 299 AdaptiveSizePolicy* CMSCollector::size_policy() {
 300   GenCollectedHeap* gch = GenCollectedHeap::heap();
 301   return gch->gen_policy()->size_policy();
 302 }
 303 
 304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 305 
 306   const char* gen_name = "old";
 307   GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
 308 
 309   // Generation Counters - generation 1, 1 subspace
 310   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 311       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 312 
 313   _space_counters = new GSpaceCounters(gen_name, 0,
 314                                        _virtual_space.reserved_size(),
 315                                        this, _gen_counters);
 316 }
 317 
 318 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 319   _cms_gen(cms_gen)
 320 {
 321   assert(alpha <= 100, "bad value");
 322   _saved_alpha = alpha;
 323 
 324   // Initialize the alphas to the bootstrap value of 100.
 325   _gc0_alpha = _cms_alpha = 100;
 326 
 327   _cms_begin_time.update();
 328   _cms_end_time.update();
 329 
 330   _gc0_duration = 0.0;
 331   _gc0_period = 0.0;
 332   _gc0_promoted = 0;
 333 
 334   _cms_duration = 0.0;
 335   _cms_period = 0.0;
 336   _cms_allocated = 0;
 337 
 338   _cms_used_at_gc0_begin = 0;
 339   _cms_used_at_gc0_end = 0;
 340   _allow_duty_cycle_reduction = false;
 341   _valid_bits = 0;
 342 }
 343 
 344 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 345   // TBD: CR 6909490
 346   return 1.0;
 347 }
 348 
 349 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 350 }
 351 
 352 // If promotion failure handling is on use
 353 // the padded average size of the promotion for each
 354 // young generation collection.
 355 double CMSStats::time_until_cms_gen_full() const {
 356   size_t cms_free = _cms_gen->cmsSpace()->free();
 357   GenCollectedHeap* gch = GenCollectedHeap::heap();
 358   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 359                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 360   if (cms_free > expected_promotion) {
 361     // Start a cms collection if there isn't enough space to promote
 362     // for the next minor collection.  Use the padded average as
 363     // a safety factor.
 364     cms_free -= expected_promotion;
 365 
 366     // Adjust by the safety factor.
 367     double cms_free_dbl = (double)cms_free;
 368     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 369     // Apply a further correction factor which tries to adjust
 370     // for recent occurance of concurrent mode failures.
 371     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 372     cms_free_dbl = cms_free_dbl * cms_adjustment;
 373 
 374     if (PrintGCDetails && Verbose) {
 375       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 376         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 377         cms_free, expected_promotion);
 378       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 379         cms_free_dbl, cms_consumption_rate() + 1.0);
 380     }
 381     // Add 1 in case the consumption rate goes to zero.
 382     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 383   }
 384   return 0.0;
 385 }
 386 
 387 // Compare the duration of the cms collection to the
 388 // time remaining before the cms generation is empty.
 389 // Note that the time from the start of the cms collection
 390 // to the start of the cms sweep (less than the total
 391 // duration of the cms collection) can be used.  This
 392 // has been tried and some applications experienced
 393 // promotion failures early in execution.  This was
 394 // possibly because the averages were not accurate
 395 // enough at the beginning.
 396 double CMSStats::time_until_cms_start() const {
 397   // We add "gc0_period" to the "work" calculation
 398   // below because this query is done (mostly) at the
 399   // end of a scavenge, so we need to conservatively
 400   // account for that much possible delay
 401   // in the query so as to avoid concurrent mode failures
 402   // due to starting the collection just a wee bit too
 403   // late.
 404   double work = cms_duration() + gc0_period();
 405   double deadline = time_until_cms_gen_full();
 406   // If a concurrent mode failure occurred recently, we want to be
 407   // more conservative and halve our expected time_until_cms_gen_full()
 408   if (work > deadline) {
 409     if (Verbose && PrintGCDetails) {
 410       gclog_or_tty->print(
 411         " CMSCollector: collect because of anticipated promotion "
 412         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 413         gc0_period(), time_until_cms_gen_full());
 414     }
 415     return 0.0;
 416   }
 417   return work - deadline;
 418 }
 419 
 420 #ifndef PRODUCT
 421 void CMSStats::print_on(outputStream *st) const {
 422   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 423   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 424                gc0_duration(), gc0_period(), gc0_promoted());
 425   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 426             cms_duration(), cms_period(), cms_allocated());
 427   st->print(",cms_since_beg=%g,cms_since_end=%g",
 428             cms_time_since_begin(), cms_time_since_end());
 429   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 430             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 431 
 432   if (valid()) {
 433     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 434               promotion_rate(), cms_allocation_rate());
 435     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 436               cms_consumption_rate(), time_until_cms_gen_full());
 437   }
 438   st->print(" ");
 439 }
 440 #endif // #ifndef PRODUCT
 441 
 442 CMSCollector::CollectorState CMSCollector::_collectorState =
 443                              CMSCollector::Idling;
 444 bool CMSCollector::_foregroundGCIsActive = false;
 445 bool CMSCollector::_foregroundGCShouldWait = false;
 446 
 447 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 448                            CardTableRS*                   ct,
 449                            ConcurrentMarkSweepPolicy*     cp):
 450   _cmsGen(cmsGen),
 451   _ct(ct),
 452   _ref_processor(NULL),    // will be set later
 453   _conc_workers(NULL),     // may be set later
 454   _abort_preclean(false),
 455   _start_sampling(false),
 456   _between_prologue_and_epilogue(false),
 457   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 458   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 459                  -1 /* lock-free */, "No_lock" /* dummy */),
 460   _modUnionClosurePar(&_modUnionTable),
 461   // Adjust my span to cover old (cms) gen
 462   _span(cmsGen->reserved()),
 463   // Construct the is_alive_closure with _span & markBitMap
 464   _is_alive_closure(_span, &_markBitMap),
 465   _restart_addr(NULL),
 466   _overflow_list(NULL),
 467   _stats(cmsGen),
 468   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 469                              //verify that this lock should be acquired with safepoint check.
 470                              Monitor::_safepoint_check_sometimes)),
 471   _eden_chunk_array(NULL),     // may be set in ctor body
 472   _eden_chunk_capacity(0),     // -- ditto --
 473   _eden_chunk_index(0),        // -- ditto --
 474   _survivor_plab_array(NULL),  // -- ditto --
 475   _survivor_chunk_array(NULL), // -- ditto --
 476   _survivor_chunk_capacity(0), // -- ditto --
 477   _survivor_chunk_index(0),    // -- ditto --
 478   _ser_pmc_preclean_ovflw(0),
 479   _ser_kac_preclean_ovflw(0),
 480   _ser_pmc_remark_ovflw(0),
 481   _par_pmc_remark_ovflw(0),
 482   _ser_kac_ovflw(0),
 483   _par_kac_ovflw(0),
 484 #ifndef PRODUCT
 485   _num_par_pushes(0),
 486 #endif
 487   _collection_count_start(0),
 488   _verifying(false),
 489   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 490   _completed_initialization(false),
 491   _collector_policy(cp),
 492   _should_unload_classes(CMSClassUnloadingEnabled),
 493   _concurrent_cycles_since_last_unload(0),
 494   _roots_scanning_options(GenCollectedHeap::SO_None),
 495   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 496   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 497   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 498   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 499   _cms_start_registered(false)
 500 {
 501   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 502     ExplicitGCInvokesConcurrent = true;
 503   }
 504   // Now expand the span and allocate the collection support structures
 505   // (MUT, marking bit map etc.) to cover both generations subject to
 506   // collection.
 507 
 508   // For use by dirty card to oop closures.
 509   _cmsGen->cmsSpace()->set_collector(this);
 510 
 511   // Allocate MUT and marking bit map
 512   {
 513     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 514     if (!_markBitMap.allocate(_span)) {
 515       warning("Failed to allocate CMS Bit Map");
 516       return;
 517     }
 518     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 519   }
 520   {
 521     _modUnionTable.allocate(_span);
 522     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 523   }
 524 
 525   if (!_markStack.allocate(MarkStackSize)) {
 526     warning("Failed to allocate CMS Marking Stack");
 527     return;
 528   }
 529 
 530   // Support for multi-threaded concurrent phases
 531   if (CMSConcurrentMTEnabled) {
 532     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 533       // just for now
 534       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 535     }
 536     if (ConcGCThreads > 1) {
 537       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
 538                                  ConcGCThreads, true);
 539       if (_conc_workers == NULL) {
 540         warning("GC/CMS: _conc_workers allocation failure: "
 541               "forcing -CMSConcurrentMTEnabled");
 542         CMSConcurrentMTEnabled = false;
 543       } else {
 544         _conc_workers->initialize_workers();
 545       }
 546     } else {
 547       CMSConcurrentMTEnabled = false;
 548     }
 549   }
 550   if (!CMSConcurrentMTEnabled) {
 551     ConcGCThreads = 0;
 552   } else {
 553     // Turn off CMSCleanOnEnter optimization temporarily for
 554     // the MT case where it's not fixed yet; see 6178663.
 555     CMSCleanOnEnter = false;
 556   }
 557   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 558          "Inconsistency");
 559 
 560   // Parallel task queues; these are shared for the
 561   // concurrent and stop-world phases of CMS, but
 562   // are not shared with parallel scavenge (ParNew).
 563   {
 564     uint i;
 565     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 566 
 567     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 568          || ParallelRefProcEnabled)
 569         && num_queues > 0) {
 570       _task_queues = new OopTaskQueueSet(num_queues);
 571       if (_task_queues == NULL) {
 572         warning("task_queues allocation failure.");
 573         return;
 574       }
 575       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 576       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 577       for (i = 0; i < num_queues; i++) {
 578         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 579         if (q == NULL) {
 580           warning("work_queue allocation failure.");
 581           return;
 582         }
 583         _task_queues->register_queue(i, q);
 584       }
 585       for (i = 0; i < num_queues; i++) {
 586         _task_queues->queue(i)->initialize();
 587         _hash_seed[i] = 17;  // copied from ParNew
 588       }
 589     }
 590   }
 591 
 592   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 593 
 594   // Clip CMSBootstrapOccupancy between 0 and 100.
 595   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 596 
 597   // Now tell CMS generations the identity of their collector
 598   ConcurrentMarkSweepGeneration::set_collector(this);
 599 
 600   // Create & start a CMS thread for this CMS collector
 601   _cmsThread = ConcurrentMarkSweepThread::start(this);
 602   assert(cmsThread() != NULL, "CMS Thread should have been created");
 603   assert(cmsThread()->collector() == this,
 604          "CMS Thread should refer to this gen");
 605   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 606 
 607   // Support for parallelizing young gen rescan
 608   GenCollectedHeap* gch = GenCollectedHeap::heap();
 609   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 610   _young_gen = (ParNewGeneration*)gch->young_gen();
 611   if (gch->supports_inline_contig_alloc()) {
 612     _top_addr = gch->top_addr();
 613     _end_addr = gch->end_addr();
 614     assert(_young_gen != NULL, "no _young_gen");
 615     _eden_chunk_index = 0;
 616     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 617     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 618   }
 619 
 620   // Support for parallelizing survivor space rescan
 621   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 622     // The 2*K (default MinTLABSize) is large enough to allow smooth striping of work
 623     // and avoids being linked to unusual MinTLABSize set on the command line
 624     const size_t max_plab_samples = ((DefNewGeneration*)_young_gen)->max_survivor_size() / (2 * K);
 625 
 626     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 627     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 628     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 629     _survivor_chunk_capacity = 2*max_plab_samples;
 630     for (uint i = 0; i < ParallelGCThreads; i++) {
 631       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 632       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 633       assert(cur->end() == 0, "Should be 0");
 634       assert(cur->array() == vec, "Should be vec");
 635       assert(cur->capacity() == max_plab_samples, "Error");
 636     }
 637   }
 638 
 639   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 640   _gc_counters = new CollectorCounters("CMS", 1);
 641   _completed_initialization = true;
 642   _inter_sweep_timer.start();  // start of time
 643 }
 644 
 645 const char* ConcurrentMarkSweepGeneration::name() const {
 646   return "concurrent mark-sweep generation";
 647 }
 648 void ConcurrentMarkSweepGeneration::update_counters() {
 649   if (UsePerfData) {
 650     _space_counters->update_all();
 651     _gen_counters->update_all();
 652   }
 653 }
 654 
 655 // this is an optimized version of update_counters(). it takes the
 656 // used value as a parameter rather than computing it.
 657 //
 658 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 659   if (UsePerfData) {
 660     _space_counters->update_used(used);
 661     _space_counters->update_capacity();
 662     _gen_counters->update_all();
 663   }
 664 }
 665 
 666 void ConcurrentMarkSweepGeneration::print() const {
 667   Generation::print();
 668   cmsSpace()->print();
 669 }
 670 
 671 #ifndef PRODUCT
 672 void ConcurrentMarkSweepGeneration::print_statistics() {
 673   cmsSpace()->printFLCensus(0);
 674 }
 675 #endif
 676 
 677 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 678   GenCollectedHeap* gch = GenCollectedHeap::heap();
 679   if (PrintGCDetails) {
 680     if (Verbose) {
 681       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 682         level(), short_name(), s, used(), capacity());
 683     } else {
 684       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 685         level(), short_name(), s, used() / K, capacity() / K);
 686     }
 687   }
 688   if (Verbose) {
 689     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 690               gch->used(), gch->capacity());
 691   } else {
 692     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 693               gch->used() / K, gch->capacity() / K);
 694   }
 695 }
 696 
 697 size_t
 698 ConcurrentMarkSweepGeneration::contiguous_available() const {
 699   // dld proposes an improvement in precision here. If the committed
 700   // part of the space ends in a free block we should add that to
 701   // uncommitted size in the calculation below. Will make this
 702   // change later, staying with the approximation below for the
 703   // time being. -- ysr.
 704   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 705 }
 706 
 707 size_t
 708 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 709   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 710 }
 711 
 712 size_t ConcurrentMarkSweepGeneration::max_available() const {
 713   return free() + _virtual_space.uncommitted_size();
 714 }
 715 
 716 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 717   size_t available = max_available();
 718   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 719   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 720   if (Verbose && PrintGCDetails) {
 721     gclog_or_tty->print_cr(
 722       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 723       "max_promo("SIZE_FORMAT")",
 724       res? "":" not", available, res? ">=":"<",
 725       av_promo, max_promotion_in_bytes);
 726   }
 727   return res;
 728 }
 729 
 730 // At a promotion failure dump information on block layout in heap
 731 // (cms old generation).
 732 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 733   if (CMSDumpAtPromotionFailure) {
 734     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 735   }
 736 }
 737 
 738 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 739   // Clear the promotion information.  These pointers can be adjusted
 740   // along with all the other pointers into the heap but
 741   // compaction is expected to be a rare event with
 742   // a heap using cms so don't do it without seeing the need.
 743   for (uint i = 0; i < ParallelGCThreads; i++) {
 744     _par_gc_thread_states[i]->promo.reset();
 745   }
 746 }
 747 
 748 void ConcurrentMarkSweepGeneration::compute_new_size() {
 749   assert_locked_or_safepoint(Heap_lock);
 750 
 751   // If incremental collection failed, we just want to expand
 752   // to the limit.
 753   if (incremental_collection_failed()) {
 754     clear_incremental_collection_failed();
 755     grow_to_reserved();
 756     return;
 757   }
 758 
 759   // The heap has been compacted but not reset yet.
 760   // Any metric such as free() or used() will be incorrect.
 761 
 762   CardGeneration::compute_new_size();
 763 
 764   // Reset again after a possible resizing
 765   if (did_compact()) {
 766     cmsSpace()->reset_after_compaction();
 767   }
 768 }
 769 
 770 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 771   assert_locked_or_safepoint(Heap_lock);
 772 
 773   // If incremental collection failed, we just want to expand
 774   // to the limit.
 775   if (incremental_collection_failed()) {
 776     clear_incremental_collection_failed();
 777     grow_to_reserved();
 778     return;
 779   }
 780 
 781   double free_percentage = ((double) free()) / capacity();
 782   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 783   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 784 
 785   // compute expansion delta needed for reaching desired free percentage
 786   if (free_percentage < desired_free_percentage) {
 787     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 788     assert(desired_capacity >= capacity(), "invalid expansion size");
 789     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 790     if (PrintGCDetails && Verbose) {
 791       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 792       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 793       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 794       gclog_or_tty->print_cr("  Desired free fraction %f",
 795         desired_free_percentage);
 796       gclog_or_tty->print_cr("  Maximum free fraction %f",
 797         maximum_free_percentage);
 798       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 799       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 800         desired_capacity/1000);
 801       int prev_level = level() - 1;
 802       if (prev_level >= 0) {
 803         size_t prev_size = 0;
 804         GenCollectedHeap* gch = GenCollectedHeap::heap();
 805         Generation* prev_gen = gch->young_gen();
 806         prev_size = prev_gen->capacity();
 807           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 808                                  prev_size/1000);
 809       }
 810       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 811         unsafe_max_alloc_nogc()/1000);
 812       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 813         contiguous_available()/1000);
 814       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 815         expand_bytes);
 816     }
 817     // safe if expansion fails
 818     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 819     if (PrintGCDetails && Verbose) {
 820       gclog_or_tty->print_cr("  Expanded free fraction %f",
 821         ((double) free()) / capacity());
 822     }
 823   } else {
 824     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 825     assert(desired_capacity <= capacity(), "invalid expansion size");
 826     size_t shrink_bytes = capacity() - desired_capacity;
 827     // Don't shrink unless the delta is greater than the minimum shrink we want
 828     if (shrink_bytes >= MinHeapDeltaBytes) {
 829       shrink_free_list_by(shrink_bytes);
 830     }
 831   }
 832 }
 833 
 834 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 835   return cmsSpace()->freelistLock();
 836 }
 837 
 838 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
 839                                                   bool   tlab) {
 840   CMSSynchronousYieldRequest yr;
 841   MutexLockerEx x(freelistLock(),
 842                   Mutex::_no_safepoint_check_flag);
 843   return have_lock_and_allocate(size, tlab);
 844 }
 845 
 846 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 847                                                   bool   tlab /* ignored */) {
 848   assert_lock_strong(freelistLock());
 849   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 850   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 851   // Allocate the object live (grey) if the background collector has
 852   // started marking. This is necessary because the marker may
 853   // have passed this address and consequently this object will
 854   // not otherwise be greyed and would be incorrectly swept up.
 855   // Note that if this object contains references, the writing
 856   // of those references will dirty the card containing this object
 857   // allowing the object to be blackened (and its references scanned)
 858   // either during a preclean phase or at the final checkpoint.
 859   if (res != NULL) {
 860     // We may block here with an uninitialized object with
 861     // its mark-bit or P-bits not yet set. Such objects need
 862     // to be safely navigable by block_start().
 863     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 864     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 865     collector()->direct_allocated(res, adjustedSize);
 866     _direct_allocated_words += adjustedSize;
 867     // allocation counters
 868     NOT_PRODUCT(
 869       _numObjectsAllocated++;
 870       _numWordsAllocated += (int)adjustedSize;
 871     )
 872   }
 873   return res;
 874 }
 875 
 876 // In the case of direct allocation by mutators in a generation that
 877 // is being concurrently collected, the object must be allocated
 878 // live (grey) if the background collector has started marking.
 879 // This is necessary because the marker may
 880 // have passed this address and consequently this object will
 881 // not otherwise be greyed and would be incorrectly swept up.
 882 // Note that if this object contains references, the writing
 883 // of those references will dirty the card containing this object
 884 // allowing the object to be blackened (and its references scanned)
 885 // either during a preclean phase or at the final checkpoint.
 886 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 887   assert(_markBitMap.covers(start, size), "Out of bounds");
 888   if (_collectorState >= Marking) {
 889     MutexLockerEx y(_markBitMap.lock(),
 890                     Mutex::_no_safepoint_check_flag);
 891     // [see comments preceding SweepClosure::do_blk() below for details]
 892     //
 893     // Can the P-bits be deleted now?  JJJ
 894     //
 895     // 1. need to mark the object as live so it isn't collected
 896     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 897     // 3. need to mark the end of the object so marking, precleaning or sweeping
 898     //    can skip over uninitialized or unparsable objects. An allocated
 899     //    object is considered uninitialized for our purposes as long as
 900     //    its klass word is NULL.  All old gen objects are parsable
 901     //    as soon as they are initialized.)
 902     _markBitMap.mark(start);          // object is live
 903     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 904     _markBitMap.mark(start + size - 1);
 905                                       // mark end of object
 906   }
 907   // check that oop looks uninitialized
 908   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 909 }
 910 
 911 void CMSCollector::promoted(bool par, HeapWord* start,
 912                             bool is_obj_array, size_t obj_size) {
 913   assert(_markBitMap.covers(start), "Out of bounds");
 914   // See comment in direct_allocated() about when objects should
 915   // be allocated live.
 916   if (_collectorState >= Marking) {
 917     // we already hold the marking bit map lock, taken in
 918     // the prologue
 919     if (par) {
 920       _markBitMap.par_mark(start);
 921     } else {
 922       _markBitMap.mark(start);
 923     }
 924     // We don't need to mark the object as uninitialized (as
 925     // in direct_allocated above) because this is being done with the
 926     // world stopped and the object will be initialized by the
 927     // time the marking, precleaning or sweeping get to look at it.
 928     // But see the code for copying objects into the CMS generation,
 929     // where we need to ensure that concurrent readers of the
 930     // block offset table are able to safely navigate a block that
 931     // is in flux from being free to being allocated (and in
 932     // transition while being copied into) and subsequently
 933     // becoming a bona-fide object when the copy/promotion is complete.
 934     assert(SafepointSynchronize::is_at_safepoint(),
 935            "expect promotion only at safepoints");
 936 
 937     if (_collectorState < Sweeping) {
 938       // Mark the appropriate cards in the modUnionTable, so that
 939       // this object gets scanned before the sweep. If this is
 940       // not done, CMS generation references in the object might
 941       // not get marked.
 942       // For the case of arrays, which are otherwise precisely
 943       // marked, we need to dirty the entire array, not just its head.
 944       if (is_obj_array) {
 945         // The [par_]mark_range() method expects mr.end() below to
 946         // be aligned to the granularity of a bit's representation
 947         // in the heap. In the case of the MUT below, that's a
 948         // card size.
 949         MemRegion mr(start,
 950                      (HeapWord*)round_to((intptr_t)(start + obj_size),
 951                         CardTableModRefBS::card_size /* bytes */));
 952         if (par) {
 953           _modUnionTable.par_mark_range(mr);
 954         } else {
 955           _modUnionTable.mark_range(mr);
 956         }
 957       } else {  // not an obj array; we can just mark the head
 958         if (par) {
 959           _modUnionTable.par_mark(start);
 960         } else {
 961           _modUnionTable.mark(start);
 962         }
 963       }
 964     }
 965   }
 966 }
 967 
 968 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 969   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 970   // allocate, copy and if necessary update promoinfo --
 971   // delegate to underlying space.
 972   assert_lock_strong(freelistLock());
 973 
 974 #ifndef PRODUCT
 975   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 976     return NULL;
 977   }
 978 #endif  // #ifndef PRODUCT
 979 
 980   oop res = _cmsSpace->promote(obj, obj_size);
 981   if (res == NULL) {
 982     // expand and retry
 983     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 984     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 985     // Since this is the old generation, we don't try to promote
 986     // into a more senior generation.
 987     res = _cmsSpace->promote(obj, obj_size);
 988   }
 989   if (res != NULL) {
 990     // See comment in allocate() about when objects should
 991     // be allocated live.
 992     assert(obj->is_oop(), "Will dereference klass pointer below");
 993     collector()->promoted(false,           // Not parallel
 994                           (HeapWord*)res, obj->is_objArray(), obj_size);
 995     // promotion counters
 996     NOT_PRODUCT(
 997       _numObjectsPromoted++;
 998       _numWordsPromoted +=
 999         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1000     )
1001   }
1002   return res;
1003 }
1004 
1005 
1006 // IMPORTANT: Notes on object size recognition in CMS.
1007 // ---------------------------------------------------
1008 // A block of storage in the CMS generation is always in
1009 // one of three states. A free block (FREE), an allocated
1010 // object (OBJECT) whose size() method reports the correct size,
1011 // and an intermediate state (TRANSIENT) in which its size cannot
1012 // be accurately determined.
1013 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1014 // -----------------------------------------------------
1015 // FREE:      klass_word & 1 == 1; mark_word holds block size
1016 //
1017 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1018 //            obj->size() computes correct size
1019 //
1020 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1021 //
1022 // STATE IDENTIFICATION: (64 bit+COOPS)
1023 // ------------------------------------
1024 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1025 //
1026 // OBJECT:    klass_word installed; klass_word != 0;
1027 //            obj->size() computes correct size
1028 //
1029 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1030 //
1031 //
1032 // STATE TRANSITION DIAGRAM
1033 //
1034 //        mut / parnew                     mut  /  parnew
1035 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1036 //  ^                                                                   |
1037 //  |------------------------ DEAD <------------------------------------|
1038 //         sweep                            mut
1039 //
1040 // While a block is in TRANSIENT state its size cannot be determined
1041 // so readers will either need to come back later or stall until
1042 // the size can be determined. Note that for the case of direct
1043 // allocation, P-bits, when available, may be used to determine the
1044 // size of an object that may not yet have been initialized.
1045 
1046 // Things to support parallel young-gen collection.
1047 oop
1048 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1049                                            oop old, markOop m,
1050                                            size_t word_sz) {
1051 #ifndef PRODUCT
1052   if (GenCollectedHeap::heap()->promotion_should_fail()) {
1053     return NULL;
1054   }
1055 #endif  // #ifndef PRODUCT
1056 
1057   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1058   PromotionInfo* promoInfo = &ps->promo;
1059   // if we are tracking promotions, then first ensure space for
1060   // promotion (including spooling space for saving header if necessary).
1061   // then allocate and copy, then track promoted info if needed.
1062   // When tracking (see PromotionInfo::track()), the mark word may
1063   // be displaced and in this case restoration of the mark word
1064   // occurs in the (oop_since_save_marks_)iterate phase.
1065   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1066     // Out of space for allocating spooling buffers;
1067     // try expanding and allocating spooling buffers.
1068     if (!expand_and_ensure_spooling_space(promoInfo)) {
1069       return NULL;
1070     }
1071   }
1072   assert(promoInfo->has_spooling_space(), "Control point invariant");
1073   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1074   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1075   if (obj_ptr == NULL) {
1076      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1077      if (obj_ptr == NULL) {
1078        return NULL;
1079      }
1080   }
1081   oop obj = oop(obj_ptr);
1082   OrderAccess::storestore();
1083   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1084   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1085   // IMPORTANT: See note on object initialization for CMS above.
1086   // Otherwise, copy the object.  Here we must be careful to insert the
1087   // klass pointer last, since this marks the block as an allocated object.
1088   // Except with compressed oops it's the mark word.
1089   HeapWord* old_ptr = (HeapWord*)old;
1090   // Restore the mark word copied above.
1091   obj->set_mark(m);
1092   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1093   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1094   OrderAccess::storestore();
1095 
1096   if (UseCompressedClassPointers) {
1097     // Copy gap missed by (aligned) header size calculation below
1098     obj->set_klass_gap(old->klass_gap());
1099   }
1100   if (word_sz > (size_t)oopDesc::header_size()) {
1101     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1102                                  obj_ptr + oopDesc::header_size(),
1103                                  word_sz - oopDesc::header_size());
1104   }
1105 
1106   // Now we can track the promoted object, if necessary.  We take care
1107   // to delay the transition from uninitialized to full object
1108   // (i.e., insertion of klass pointer) until after, so that it
1109   // atomically becomes a promoted object.
1110   if (promoInfo->tracking()) {
1111     promoInfo->track((PromotedObject*)obj, old->klass());
1112   }
1113   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1114   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1115   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1116 
1117   // Finally, install the klass pointer (this should be volatile).
1118   OrderAccess::storestore();
1119   obj->set_klass(old->klass());
1120   // We should now be able to calculate the right size for this object
1121   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1122 
1123   collector()->promoted(true,          // parallel
1124                         obj_ptr, old->is_objArray(), word_sz);
1125 
1126   NOT_PRODUCT(
1127     Atomic::inc_ptr(&_numObjectsPromoted);
1128     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1129   )
1130 
1131   return obj;
1132 }
1133 
1134 void
1135 ConcurrentMarkSweepGeneration::
1136 par_promote_alloc_done(int thread_num) {
1137   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1138   ps->lab.retire(thread_num);
1139 }
1140 
1141 void
1142 ConcurrentMarkSweepGeneration::
1143 par_oop_since_save_marks_iterate_done(int thread_num) {
1144   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1145   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1146   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1147 }
1148 
1149 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1150                                                    size_t size,
1151                                                    bool   tlab)
1152 {
1153   // We allow a STW collection only if a full
1154   // collection was requested.
1155   return full || should_allocate(size, tlab); // FIX ME !!!
1156   // This and promotion failure handling are connected at the
1157   // hip and should be fixed by untying them.
1158 }
1159 
1160 bool CMSCollector::shouldConcurrentCollect() {
1161   if (_full_gc_requested) {
1162     if (Verbose && PrintGCDetails) {
1163       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1164                              " gc request (or gc_locker)");
1165     }
1166     return true;
1167   }
1168 
1169   FreelistLocker x(this);
1170   // ------------------------------------------------------------------
1171   // Print out lots of information which affects the initiation of
1172   // a collection.
1173   if (PrintCMSInitiationStatistics && stats().valid()) {
1174     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1175     gclog_or_tty->stamp();
1176     gclog_or_tty->cr();
1177     stats().print_on(gclog_or_tty);
1178     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1179       stats().time_until_cms_gen_full());
1180     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1181     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1182                            _cmsGen->contiguous_available());
1183     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1184     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1185     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1186     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1187     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1188     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1189     gclog_or_tty->print_cr("metadata initialized %d",
1190       MetaspaceGC::should_concurrent_collect());
1191   }
1192   // ------------------------------------------------------------------
1193 
1194   // If the estimated time to complete a cms collection (cms_duration())
1195   // is less than the estimated time remaining until the cms generation
1196   // is full, start a collection.
1197   if (!UseCMSInitiatingOccupancyOnly) {
1198     if (stats().valid()) {
1199       if (stats().time_until_cms_start() == 0.0) {
1200         return true;
1201       }
1202     } else {
1203       // We want to conservatively collect somewhat early in order
1204       // to try and "bootstrap" our CMS/promotion statistics;
1205       // this branch will not fire after the first successful CMS
1206       // collection because the stats should then be valid.
1207       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1208         if (Verbose && PrintGCDetails) {
1209           gclog_or_tty->print_cr(
1210             " CMSCollector: collect for bootstrapping statistics:"
1211             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1212             _bootstrap_occupancy);
1213         }
1214         return true;
1215       }
1216     }
1217   }
1218 
1219   // Otherwise, we start a collection cycle if
1220   // old gen want a collection cycle started. Each may use
1221   // an appropriate criterion for making this decision.
1222   // XXX We need to make sure that the gen expansion
1223   // criterion dovetails well with this. XXX NEED TO FIX THIS
1224   if (_cmsGen->should_concurrent_collect()) {
1225     if (Verbose && PrintGCDetails) {
1226       gclog_or_tty->print_cr("CMS old gen initiated");
1227     }
1228     return true;
1229   }
1230 
1231   // We start a collection if we believe an incremental collection may fail;
1232   // this is not likely to be productive in practice because it's probably too
1233   // late anyway.
1234   GenCollectedHeap* gch = GenCollectedHeap::heap();
1235   assert(gch->collector_policy()->is_generation_policy(),
1236          "You may want to check the correctness of the following");
1237   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1238     if (Verbose && PrintGCDetails) {
1239       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1240     }
1241     return true;
1242   }
1243 
1244   if (MetaspaceGC::should_concurrent_collect()) {
1245     if (Verbose && PrintGCDetails) {
1246       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1247     }
1248     return true;
1249   }
1250 
1251   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1252   if (CMSTriggerInterval >= 0) {
1253     if (CMSTriggerInterval == 0) {
1254       // Trigger always
1255       return true;
1256     }
1257 
1258     // Check the CMS time since begin (we do not check the stats validity
1259     // as we want to be able to trigger the first CMS cycle as well)
1260     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1261       if (Verbose && PrintGCDetails) {
1262         if (stats().valid()) {
1263           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1264                                  stats().cms_time_since_begin());
1265         } else {
1266           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1267         }
1268       }
1269       return true;
1270     }
1271   }
1272 
1273   return false;
1274 }
1275 
1276 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1277 
1278 // Clear _expansion_cause fields of constituent generations
1279 void CMSCollector::clear_expansion_cause() {
1280   _cmsGen->clear_expansion_cause();
1281 }
1282 
1283 // We should be conservative in starting a collection cycle.  To
1284 // start too eagerly runs the risk of collecting too often in the
1285 // extreme.  To collect too rarely falls back on full collections,
1286 // which works, even if not optimum in terms of concurrent work.
1287 // As a work around for too eagerly collecting, use the flag
1288 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1289 // giving the user an easily understandable way of controlling the
1290 // collections.
1291 // We want to start a new collection cycle if any of the following
1292 // conditions hold:
1293 // . our current occupancy exceeds the configured initiating occupancy
1294 //   for this generation, or
1295 // . we recently needed to expand this space and have not, since that
1296 //   expansion, done a collection of this generation, or
1297 // . the underlying space believes that it may be a good idea to initiate
1298 //   a concurrent collection (this may be based on criteria such as the
1299 //   following: the space uses linear allocation and linear allocation is
1300 //   going to fail, or there is believed to be excessive fragmentation in
1301 //   the generation, etc... or ...
1302 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1303 //   the case of the old generation; see CR 6543076):
1304 //   we may be approaching a point at which allocation requests may fail because
1305 //   we will be out of sufficient free space given allocation rate estimates.]
1306 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1307 
1308   assert_lock_strong(freelistLock());
1309   if (occupancy() > initiating_occupancy()) {
1310     if (PrintGCDetails && Verbose) {
1311       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1312         short_name(), occupancy(), initiating_occupancy());
1313     }
1314     return true;
1315   }
1316   if (UseCMSInitiatingOccupancyOnly) {
1317     return false;
1318   }
1319   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1320     if (PrintGCDetails && Verbose) {
1321       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1322         short_name());
1323     }
1324     return true;
1325   }
1326   if (_cmsSpace->should_concurrent_collect()) {
1327     if (PrintGCDetails && Verbose) {
1328       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1329         short_name());
1330     }
1331     return true;
1332   }
1333   return false;
1334 }
1335 
1336 void ConcurrentMarkSweepGeneration::collect(bool   full,
1337                                             bool   clear_all_soft_refs,
1338                                             size_t size,
1339                                             bool   tlab)
1340 {
1341   collector()->collect(full, clear_all_soft_refs, size, tlab);
1342 }
1343 
1344 void CMSCollector::collect(bool   full,
1345                            bool   clear_all_soft_refs,
1346                            size_t size,
1347                            bool   tlab)
1348 {
1349   // The following "if" branch is present for defensive reasons.
1350   // In the current uses of this interface, it can be replaced with:
1351   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1352   // But I am not placing that assert here to allow future
1353   // generality in invoking this interface.
1354   if (GC_locker::is_active()) {
1355     // A consistency test for GC_locker
1356     assert(GC_locker::needs_gc(), "Should have been set already");
1357     // Skip this foreground collection, instead
1358     // expanding the heap if necessary.
1359     // Need the free list locks for the call to free() in compute_new_size()
1360     compute_new_size();
1361     return;
1362   }
1363   acquire_control_and_collect(full, clear_all_soft_refs);
1364 }
1365 
1366 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1367   GenCollectedHeap* gch = GenCollectedHeap::heap();
1368   unsigned int gc_count = gch->total_full_collections();
1369   if (gc_count == full_gc_count) {
1370     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1371     _full_gc_requested = true;
1372     _full_gc_cause = cause;
1373     CGC_lock->notify();   // nudge CMS thread
1374   } else {
1375     assert(gc_count > full_gc_count, "Error: causal loop");
1376   }
1377 }
1378 
1379 bool CMSCollector::is_external_interruption() {
1380   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1381   return GCCause::is_user_requested_gc(cause) ||
1382          GCCause::is_serviceability_requested_gc(cause);
1383 }
1384 
1385 void CMSCollector::report_concurrent_mode_interruption() {
1386   if (is_external_interruption()) {
1387     if (PrintGCDetails) {
1388       gclog_or_tty->print(" (concurrent mode interrupted)");
1389     }
1390   } else {
1391     if (PrintGCDetails) {
1392       gclog_or_tty->print(" (concurrent mode failure)");
1393     }
1394     _gc_tracer_cm->report_concurrent_mode_failure();
1395   }
1396 }
1397 
1398 
1399 // The foreground and background collectors need to coordinate in order
1400 // to make sure that they do not mutually interfere with CMS collections.
1401 // When a background collection is active,
1402 // the foreground collector may need to take over (preempt) and
1403 // synchronously complete an ongoing collection. Depending on the
1404 // frequency of the background collections and the heap usage
1405 // of the application, this preemption can be seldom or frequent.
1406 // There are only certain
1407 // points in the background collection that the "collection-baton"
1408 // can be passed to the foreground collector.
1409 //
1410 // The foreground collector will wait for the baton before
1411 // starting any part of the collection.  The foreground collector
1412 // will only wait at one location.
1413 //
1414 // The background collector will yield the baton before starting a new
1415 // phase of the collection (e.g., before initial marking, marking from roots,
1416 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1417 // of the loop which switches the phases. The background collector does some
1418 // of the phases (initial mark, final re-mark) with the world stopped.
1419 // Because of locking involved in stopping the world,
1420 // the foreground collector should not block waiting for the background
1421 // collector when it is doing a stop-the-world phase.  The background
1422 // collector will yield the baton at an additional point just before
1423 // it enters a stop-the-world phase.  Once the world is stopped, the
1424 // background collector checks the phase of the collection.  If the
1425 // phase has not changed, it proceeds with the collection.  If the
1426 // phase has changed, it skips that phase of the collection.  See
1427 // the comments on the use of the Heap_lock in collect_in_background().
1428 //
1429 // Variable used in baton passing.
1430 //   _foregroundGCIsActive - Set to true by the foreground collector when
1431 //      it wants the baton.  The foreground clears it when it has finished
1432 //      the collection.
1433 //   _foregroundGCShouldWait - Set to true by the background collector
1434 //        when it is running.  The foreground collector waits while
1435 //      _foregroundGCShouldWait is true.
1436 //  CGC_lock - monitor used to protect access to the above variables
1437 //      and to notify the foreground and background collectors.
1438 //  _collectorState - current state of the CMS collection.
1439 //
1440 // The foreground collector
1441 //   acquires the CGC_lock
1442 //   sets _foregroundGCIsActive
1443 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1444 //     various locks acquired in preparation for the collection
1445 //     are released so as not to block the background collector
1446 //     that is in the midst of a collection
1447 //   proceeds with the collection
1448 //   clears _foregroundGCIsActive
1449 //   returns
1450 //
1451 // The background collector in a loop iterating on the phases of the
1452 //      collection
1453 //   acquires the CGC_lock
1454 //   sets _foregroundGCShouldWait
1455 //   if _foregroundGCIsActive is set
1456 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1457 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1458 //     and exits the loop.
1459 //   otherwise
1460 //     proceed with that phase of the collection
1461 //     if the phase is a stop-the-world phase,
1462 //       yield the baton once more just before enqueueing
1463 //       the stop-world CMS operation (executed by the VM thread).
1464 //   returns after all phases of the collection are done
1465 //
1466 
1467 void CMSCollector::acquire_control_and_collect(bool full,
1468         bool clear_all_soft_refs) {
1469   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1470   assert(!Thread::current()->is_ConcurrentGC_thread(),
1471          "shouldn't try to acquire control from self!");
1472 
1473   // Start the protocol for acquiring control of the
1474   // collection from the background collector (aka CMS thread).
1475   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1476          "VM thread should have CMS token");
1477   // Remember the possibly interrupted state of an ongoing
1478   // concurrent collection
1479   CollectorState first_state = _collectorState;
1480 
1481   // Signal to a possibly ongoing concurrent collection that
1482   // we want to do a foreground collection.
1483   _foregroundGCIsActive = true;
1484 
1485   // release locks and wait for a notify from the background collector
1486   // releasing the locks in only necessary for phases which
1487   // do yields to improve the granularity of the collection.
1488   assert_lock_strong(bitMapLock());
1489   // We need to lock the Free list lock for the space that we are
1490   // currently collecting.
1491   assert(haveFreelistLocks(), "Must be holding free list locks");
1492   bitMapLock()->unlock();
1493   releaseFreelistLocks();
1494   {
1495     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1496     if (_foregroundGCShouldWait) {
1497       // We are going to be waiting for action for the CMS thread;
1498       // it had better not be gone (for instance at shutdown)!
1499       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1500              "CMS thread must be running");
1501       // Wait here until the background collector gives us the go-ahead
1502       ConcurrentMarkSweepThread::clear_CMS_flag(
1503         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1504       // Get a possibly blocked CMS thread going:
1505       //   Note that we set _foregroundGCIsActive true above,
1506       //   without protection of the CGC_lock.
1507       CGC_lock->notify();
1508       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1509              "Possible deadlock");
1510       while (_foregroundGCShouldWait) {
1511         // wait for notification
1512         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1513         // Possibility of delay/starvation here, since CMS token does
1514         // not know to give priority to VM thread? Actually, i think
1515         // there wouldn't be any delay/starvation, but the proof of
1516         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1517       }
1518       ConcurrentMarkSweepThread::set_CMS_flag(
1519         ConcurrentMarkSweepThread::CMS_vm_has_token);
1520     }
1521   }
1522   // The CMS_token is already held.  Get back the other locks.
1523   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1524          "VM thread should have CMS token");
1525   getFreelistLocks();
1526   bitMapLock()->lock_without_safepoint_check();
1527   if (TraceCMSState) {
1528     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1529       INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1530     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1531   }
1532 
1533   // Inform cms gen if this was due to partial collection failing.
1534   // The CMS gen may use this fact to determine its expansion policy.
1535   GenCollectedHeap* gch = GenCollectedHeap::heap();
1536   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1537     assert(!_cmsGen->incremental_collection_failed(),
1538            "Should have been noticed, reacted to and cleared");
1539     _cmsGen->set_incremental_collection_failed();
1540   }
1541 
1542   if (first_state > Idling) {
1543     report_concurrent_mode_interruption();
1544   }
1545 
1546   set_did_compact(true);
1547 
1548   // If the collection is being acquired from the background
1549   // collector, there may be references on the discovered
1550   // references lists.  Abandon those references, since some
1551   // of them may have become unreachable after concurrent
1552   // discovery; the STW compacting collector will redo discovery
1553   // more precisely, without being subject to floating garbage.
1554   // Leaving otherwise unreachable references in the discovered
1555   // lists would require special handling.
1556   ref_processor()->disable_discovery();
1557   ref_processor()->abandon_partial_discovery();
1558   ref_processor()->verify_no_references_recorded();
1559 
1560   if (first_state > Idling) {
1561     save_heap_summary();
1562   }
1563 
1564   do_compaction_work(clear_all_soft_refs);
1565 
1566   // Has the GC time limit been exceeded?
1567   size_t max_eden_size = _young_gen->max_capacity() -
1568                          _young_gen->to()->capacity() -
1569                          _young_gen->from()->capacity();
1570   GCCause::Cause gc_cause = gch->gc_cause();
1571   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1572                                          _young_gen->eden()->used(),
1573                                          _cmsGen->max_capacity(),
1574                                          max_eden_size,
1575                                          full,
1576                                          gc_cause,
1577                                          gch->collector_policy());
1578 
1579   // Reset the expansion cause, now that we just completed
1580   // a collection cycle.
1581   clear_expansion_cause();
1582   _foregroundGCIsActive = false;
1583   return;
1584 }
1585 
1586 // Resize the tenured generation
1587 // after obtaining the free list locks for the
1588 // two generations.
1589 void CMSCollector::compute_new_size() {
1590   assert_locked_or_safepoint(Heap_lock);
1591   FreelistLocker z(this);
1592   MetaspaceGC::compute_new_size();
1593   _cmsGen->compute_new_size_free_list();
1594 }
1595 
1596 // A work method used by the foreground collector to do
1597 // a mark-sweep-compact.
1598 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1599   GenCollectedHeap* gch = GenCollectedHeap::heap();
1600 
1601   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1602   gc_timer->register_gc_start();
1603 
1604   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1605   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1606 
1607   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1608 
1609   // Temporarily widen the span of the weak reference processing to
1610   // the entire heap.
1611   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1612   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1613   // Temporarily, clear the "is_alive_non_header" field of the
1614   // reference processor.
1615   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1616   // Temporarily make reference _processing_ single threaded (non-MT).
1617   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1618   // Temporarily make refs discovery atomic
1619   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1620   // Temporarily make reference _discovery_ single threaded (non-MT)
1621   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1622 
1623   ref_processor()->set_enqueuing_is_done(false);
1624   ref_processor()->enable_discovery();
1625   ref_processor()->setup_policy(clear_all_soft_refs);
1626   // If an asynchronous collection finishes, the _modUnionTable is
1627   // all clear.  If we are assuming the collection from an asynchronous
1628   // collection, clear the _modUnionTable.
1629   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1630     "_modUnionTable should be clear if the baton was not passed");
1631   _modUnionTable.clear_all();
1632   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1633     "mod union for klasses should be clear if the baton was passed");
1634   _ct->klass_rem_set()->clear_mod_union();
1635 
1636   // We must adjust the allocation statistics being maintained
1637   // in the free list space. We do so by reading and clearing
1638   // the sweep timer and updating the block flux rate estimates below.
1639   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1640   if (_inter_sweep_timer.is_active()) {
1641     _inter_sweep_timer.stop();
1642     // Note that we do not use this sample to update the _inter_sweep_estimate.
1643     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1644                                             _inter_sweep_estimate.padded_average(),
1645                                             _intra_sweep_estimate.padded_average());
1646   }
1647 
1648   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1649     ref_processor(), clear_all_soft_refs);
1650   #ifdef ASSERT
1651     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1652     size_t free_size = cms_space->free();
1653     assert(free_size ==
1654            pointer_delta(cms_space->end(), cms_space->compaction_top())
1655            * HeapWordSize,
1656       "All the free space should be compacted into one chunk at top");
1657     assert(cms_space->dictionary()->total_chunk_size(
1658                                       debug_only(cms_space->freelistLock())) == 0 ||
1659            cms_space->totalSizeInIndexedFreeLists() == 0,
1660       "All the free space should be in a single chunk");
1661     size_t num = cms_space->totalCount();
1662     assert((free_size == 0 && num == 0) ||
1663            (free_size > 0  && (num == 1 || num == 2)),
1664          "There should be at most 2 free chunks after compaction");
1665   #endif // ASSERT
1666   _collectorState = Resetting;
1667   assert(_restart_addr == NULL,
1668          "Should have been NULL'd before baton was passed");
1669   reset(false /* == !concurrent */);
1670   _cmsGen->reset_after_compaction();
1671   _concurrent_cycles_since_last_unload = 0;
1672 
1673   // Clear any data recorded in the PLAB chunk arrays.
1674   if (_survivor_plab_array != NULL) {
1675     reset_survivor_plab_arrays();
1676   }
1677 
1678   // Adjust the per-size allocation stats for the next epoch.
1679   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1680   // Restart the "inter sweep timer" for the next epoch.
1681   _inter_sweep_timer.reset();
1682   _inter_sweep_timer.start();
1683 
1684   gc_timer->register_gc_end();
1685 
1686   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1687 
1688   // For a mark-sweep-compact, compute_new_size() will be called
1689   // in the heap's do_collection() method.
1690 }
1691 
1692 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1693   ContiguousSpace* eden_space = _young_gen->eden();
1694   ContiguousSpace* from_space = _young_gen->from();
1695   ContiguousSpace* to_space   = _young_gen->to();
1696   // Eden
1697   if (_eden_chunk_array != NULL) {
1698     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1699                            p2i(eden_space->bottom()), p2i(eden_space->top()),
1700                            p2i(eden_space->end()), eden_space->capacity());
1701     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1702                            "_eden_chunk_capacity=" SIZE_FORMAT,
1703                            _eden_chunk_index, _eden_chunk_capacity);
1704     for (size_t i = 0; i < _eden_chunk_index; i++) {
1705       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1706                              i, p2i(_eden_chunk_array[i]));
1707     }
1708   }
1709   // Survivor
1710   if (_survivor_chunk_array != NULL) {
1711     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1712                            p2i(from_space->bottom()), p2i(from_space->top()),
1713                            p2i(from_space->end()), from_space->capacity());
1714     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1715                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1716                            _survivor_chunk_index, _survivor_chunk_capacity);
1717     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1718       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1719                              i, p2i(_survivor_chunk_array[i]));
1720     }
1721   }
1722 }
1723 
1724 void CMSCollector::getFreelistLocks() const {
1725   // Get locks for all free lists in all generations that this
1726   // collector is responsible for
1727   _cmsGen->freelistLock()->lock_without_safepoint_check();
1728 }
1729 
1730 void CMSCollector::releaseFreelistLocks() const {
1731   // Release locks for all free lists in all generations that this
1732   // collector is responsible for
1733   _cmsGen->freelistLock()->unlock();
1734 }
1735 
1736 bool CMSCollector::haveFreelistLocks() const {
1737   // Check locks for all free lists in all generations that this
1738   // collector is responsible for
1739   assert_lock_strong(_cmsGen->freelistLock());
1740   PRODUCT_ONLY(ShouldNotReachHere());
1741   return true;
1742 }
1743 
1744 // A utility class that is used by the CMS collector to
1745 // temporarily "release" the foreground collector from its
1746 // usual obligation to wait for the background collector to
1747 // complete an ongoing phase before proceeding.
1748 class ReleaseForegroundGC: public StackObj {
1749  private:
1750   CMSCollector* _c;
1751  public:
1752   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1753     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1754     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1755     // allow a potentially blocked foreground collector to proceed
1756     _c->_foregroundGCShouldWait = false;
1757     if (_c->_foregroundGCIsActive) {
1758       CGC_lock->notify();
1759     }
1760     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1761            "Possible deadlock");
1762   }
1763 
1764   ~ReleaseForegroundGC() {
1765     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1766     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1767     _c->_foregroundGCShouldWait = true;
1768   }
1769 };
1770 
1771 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1772   assert(Thread::current()->is_ConcurrentGC_thread(),
1773     "A CMS asynchronous collection is only allowed on a CMS thread.");
1774 
1775   GenCollectedHeap* gch = GenCollectedHeap::heap();
1776   {
1777     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1778     MutexLockerEx hl(Heap_lock, safepoint_check);
1779     FreelistLocker fll(this);
1780     MutexLockerEx x(CGC_lock, safepoint_check);
1781     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1782       // The foreground collector is active or we're
1783       // not using asynchronous collections.  Skip this
1784       // background collection.
1785       assert(!_foregroundGCShouldWait, "Should be clear");
1786       return;
1787     } else {
1788       assert(_collectorState == Idling, "Should be idling before start.");
1789       _collectorState = InitialMarking;
1790       register_gc_start(cause);
1791       // Reset the expansion cause, now that we are about to begin
1792       // a new cycle.
1793       clear_expansion_cause();
1794 
1795       // Clear the MetaspaceGC flag since a concurrent collection
1796       // is starting but also clear it after the collection.
1797       MetaspaceGC::set_should_concurrent_collect(false);
1798     }
1799     // Decide if we want to enable class unloading as part of the
1800     // ensuing concurrent GC cycle.
1801     update_should_unload_classes();
1802     _full_gc_requested = false;           // acks all outstanding full gc requests
1803     _full_gc_cause = GCCause::_no_gc;
1804     // Signal that we are about to start a collection
1805     gch->increment_total_full_collections();  // ... starting a collection cycle
1806     _collection_count_start = gch->total_full_collections();
1807   }
1808 
1809   // Used for PrintGC
1810   size_t prev_used;
1811   if (PrintGC && Verbose) {
1812     prev_used = _cmsGen->used();
1813   }
1814 
1815   // The change of the collection state is normally done at this level;
1816   // the exceptions are phases that are executed while the world is
1817   // stopped.  For those phases the change of state is done while the
1818   // world is stopped.  For baton passing purposes this allows the
1819   // background collector to finish the phase and change state atomically.
1820   // The foreground collector cannot wait on a phase that is done
1821   // while the world is stopped because the foreground collector already
1822   // has the world stopped and would deadlock.
1823   while (_collectorState != Idling) {
1824     if (TraceCMSState) {
1825       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1826         p2i(Thread::current()), _collectorState);
1827     }
1828     // The foreground collector
1829     //   holds the Heap_lock throughout its collection.
1830     //   holds the CMS token (but not the lock)
1831     //     except while it is waiting for the background collector to yield.
1832     //
1833     // The foreground collector should be blocked (not for long)
1834     //   if the background collector is about to start a phase
1835     //   executed with world stopped.  If the background
1836     //   collector has already started such a phase, the
1837     //   foreground collector is blocked waiting for the
1838     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1839     //   are executed in the VM thread.
1840     //
1841     // The locking order is
1842     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1843     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1844     //   CMS token  (claimed in
1845     //                stop_world_and_do() -->
1846     //                  safepoint_synchronize() -->
1847     //                    CMSThread::synchronize())
1848 
1849     {
1850       // Check if the FG collector wants us to yield.
1851       CMSTokenSync x(true); // is cms thread
1852       if (waitForForegroundGC()) {
1853         // We yielded to a foreground GC, nothing more to be
1854         // done this round.
1855         assert(_foregroundGCShouldWait == false, "We set it to false in "
1856                "waitForForegroundGC()");
1857         if (TraceCMSState) {
1858           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1859             " exiting collection CMS state %d",
1860             p2i(Thread::current()), _collectorState);
1861         }
1862         return;
1863       } else {
1864         // The background collector can run but check to see if the
1865         // foreground collector has done a collection while the
1866         // background collector was waiting to get the CGC_lock
1867         // above.  If yes, break so that _foregroundGCShouldWait
1868         // is cleared before returning.
1869         if (_collectorState == Idling) {
1870           break;
1871         }
1872       }
1873     }
1874 
1875     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1876       "should be waiting");
1877 
1878     switch (_collectorState) {
1879       case InitialMarking:
1880         {
1881           ReleaseForegroundGC x(this);
1882           stats().record_cms_begin();
1883           VM_CMS_Initial_Mark initial_mark_op(this);
1884           VMThread::execute(&initial_mark_op);
1885         }
1886         // The collector state may be any legal state at this point
1887         // since the background collector may have yielded to the
1888         // foreground collector.
1889         break;
1890       case Marking:
1891         // initial marking in checkpointRootsInitialWork has been completed
1892         if (markFromRoots()) { // we were successful
1893           assert(_collectorState == Precleaning, "Collector state should "
1894             "have changed");
1895         } else {
1896           assert(_foregroundGCIsActive, "Internal state inconsistency");
1897         }
1898         break;
1899       case Precleaning:
1900         // marking from roots in markFromRoots has been completed
1901         preclean();
1902         assert(_collectorState == AbortablePreclean ||
1903                _collectorState == FinalMarking,
1904                "Collector state should have changed");
1905         break;
1906       case AbortablePreclean:
1907         abortable_preclean();
1908         assert(_collectorState == FinalMarking, "Collector state should "
1909           "have changed");
1910         break;
1911       case FinalMarking:
1912         {
1913           ReleaseForegroundGC x(this);
1914 
1915           VM_CMS_Final_Remark final_remark_op(this);
1916           VMThread::execute(&final_remark_op);
1917         }
1918         assert(_foregroundGCShouldWait, "block post-condition");
1919         break;
1920       case Sweeping:
1921         // final marking in checkpointRootsFinal has been completed
1922         sweep();
1923         assert(_collectorState == Resizing, "Collector state change "
1924           "to Resizing must be done under the free_list_lock");
1925 
1926       case Resizing: {
1927         // Sweeping has been completed...
1928         // At this point the background collection has completed.
1929         // Don't move the call to compute_new_size() down
1930         // into code that might be executed if the background
1931         // collection was preempted.
1932         {
1933           ReleaseForegroundGC x(this);   // unblock FG collection
1934           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1935           CMSTokenSync        z(true);   // not strictly needed.
1936           if (_collectorState == Resizing) {
1937             compute_new_size();
1938             save_heap_summary();
1939             _collectorState = Resetting;
1940           } else {
1941             assert(_collectorState == Idling, "The state should only change"
1942                    " because the foreground collector has finished the collection");
1943           }
1944         }
1945         break;
1946       }
1947       case Resetting:
1948         // CMS heap resizing has been completed
1949         reset(true);
1950         assert(_collectorState == Idling, "Collector state should "
1951           "have changed");
1952 
1953         MetaspaceGC::set_should_concurrent_collect(false);
1954 
1955         stats().record_cms_end();
1956         // Don't move the concurrent_phases_end() and compute_new_size()
1957         // calls to here because a preempted background collection
1958         // has it's state set to "Resetting".
1959         break;
1960       case Idling:
1961       default:
1962         ShouldNotReachHere();
1963         break;
1964     }
1965     if (TraceCMSState) {
1966       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1967         p2i(Thread::current()), _collectorState);
1968     }
1969     assert(_foregroundGCShouldWait, "block post-condition");
1970   }
1971 
1972   // Should this be in gc_epilogue?
1973   collector_policy()->counters()->update_counters();
1974 
1975   {
1976     // Clear _foregroundGCShouldWait and, in the event that the
1977     // foreground collector is waiting, notify it, before
1978     // returning.
1979     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1980     _foregroundGCShouldWait = false;
1981     if (_foregroundGCIsActive) {
1982       CGC_lock->notify();
1983     }
1984     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1985            "Possible deadlock");
1986   }
1987   if (TraceCMSState) {
1988     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1989       " exiting collection CMS state %d",
1990       p2i(Thread::current()), _collectorState);
1991   }
1992   if (PrintGC && Verbose) {
1993     _cmsGen->print_heap_change(prev_used);
1994   }
1995 }
1996 
1997 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1998   _cms_start_registered = true;
1999   _gc_timer_cm->register_gc_start();
2000   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2001 }
2002 
2003 void CMSCollector::register_gc_end() {
2004   if (_cms_start_registered) {
2005     report_heap_summary(GCWhen::AfterGC);
2006 
2007     _gc_timer_cm->register_gc_end();
2008     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2009     _cms_start_registered = false;
2010   }
2011 }
2012 
2013 void CMSCollector::save_heap_summary() {
2014   GenCollectedHeap* gch = GenCollectedHeap::heap();
2015   _last_heap_summary = gch->create_heap_summary();
2016   _last_metaspace_summary = gch->create_metaspace_summary();
2017 }
2018 
2019 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2020   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2021   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2022 }
2023 
2024 bool CMSCollector::waitForForegroundGC() {
2025   bool res = false;
2026   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2027          "CMS thread should have CMS token");
2028   // Block the foreground collector until the
2029   // background collectors decides whether to
2030   // yield.
2031   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2032   _foregroundGCShouldWait = true;
2033   if (_foregroundGCIsActive) {
2034     // The background collector yields to the
2035     // foreground collector and returns a value
2036     // indicating that it has yielded.  The foreground
2037     // collector can proceed.
2038     res = true;
2039     _foregroundGCShouldWait = false;
2040     ConcurrentMarkSweepThread::clear_CMS_flag(
2041       ConcurrentMarkSweepThread::CMS_cms_has_token);
2042     ConcurrentMarkSweepThread::set_CMS_flag(
2043       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2044     // Get a possibly blocked foreground thread going
2045     CGC_lock->notify();
2046     if (TraceCMSState) {
2047       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2048         p2i(Thread::current()), _collectorState);
2049     }
2050     while (_foregroundGCIsActive) {
2051       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2052     }
2053     ConcurrentMarkSweepThread::set_CMS_flag(
2054       ConcurrentMarkSweepThread::CMS_cms_has_token);
2055     ConcurrentMarkSweepThread::clear_CMS_flag(
2056       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2057   }
2058   if (TraceCMSState) {
2059     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2060       p2i(Thread::current()), _collectorState);
2061   }
2062   return res;
2063 }
2064 
2065 // Because of the need to lock the free lists and other structures in
2066 // the collector, common to all the generations that the collector is
2067 // collecting, we need the gc_prologues of individual CMS generations
2068 // delegate to their collector. It may have been simpler had the
2069 // current infrastructure allowed one to call a prologue on a
2070 // collector. In the absence of that we have the generation's
2071 // prologue delegate to the collector, which delegates back
2072 // some "local" work to a worker method in the individual generations
2073 // that it's responsible for collecting, while itself doing any
2074 // work common to all generations it's responsible for. A similar
2075 // comment applies to the  gc_epilogue()'s.
2076 // The role of the variable _between_prologue_and_epilogue is to
2077 // enforce the invocation protocol.
2078 void CMSCollector::gc_prologue(bool full) {
2079   // Call gc_prologue_work() for the CMSGen
2080   // we are responsible for.
2081 
2082   // The following locking discipline assumes that we are only called
2083   // when the world is stopped.
2084   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2085 
2086   // The CMSCollector prologue must call the gc_prologues for the
2087   // "generations" that it's responsible
2088   // for.
2089 
2090   assert(   Thread::current()->is_VM_thread()
2091          || (   CMSScavengeBeforeRemark
2092              && Thread::current()->is_ConcurrentGC_thread()),
2093          "Incorrect thread type for prologue execution");
2094 
2095   if (_between_prologue_and_epilogue) {
2096     // We have already been invoked; this is a gc_prologue delegation
2097     // from yet another CMS generation that we are responsible for, just
2098     // ignore it since all relevant work has already been done.
2099     return;
2100   }
2101 
2102   // set a bit saying prologue has been called; cleared in epilogue
2103   _between_prologue_and_epilogue = true;
2104   // Claim locks for common data structures, then call gc_prologue_work()
2105   // for each CMSGen.
2106 
2107   getFreelistLocks();   // gets free list locks on constituent spaces
2108   bitMapLock()->lock_without_safepoint_check();
2109 
2110   // Should call gc_prologue_work() for all cms gens we are responsible for
2111   bool duringMarking =    _collectorState >= Marking
2112                          && _collectorState < Sweeping;
2113 
2114   // The young collections clear the modified oops state, which tells if
2115   // there are any modified oops in the class. The remark phase also needs
2116   // that information. Tell the young collection to save the union of all
2117   // modified klasses.
2118   if (duringMarking) {
2119     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2120   }
2121 
2122   bool registerClosure = duringMarking;
2123 
2124   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2125 
2126   if (!full) {
2127     stats().record_gc0_begin();
2128   }
2129 }
2130 
2131 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2132 
2133   _capacity_at_prologue = capacity();
2134   _used_at_prologue = used();
2135 
2136   // Delegate to CMScollector which knows how to coordinate between
2137   // this and any other CMS generations that it is responsible for
2138   // collecting.
2139   collector()->gc_prologue(full);
2140 }
2141 
2142 // This is a "private" interface for use by this generation's CMSCollector.
2143 // Not to be called directly by any other entity (for instance,
2144 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2145 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2146   bool registerClosure, ModUnionClosure* modUnionClosure) {
2147   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2148   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2149     "Should be NULL");
2150   if (registerClosure) {
2151     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2152   }
2153   cmsSpace()->gc_prologue();
2154   // Clear stat counters
2155   NOT_PRODUCT(
2156     assert(_numObjectsPromoted == 0, "check");
2157     assert(_numWordsPromoted   == 0, "check");
2158     if (Verbose && PrintGC) {
2159       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2160                           SIZE_FORMAT" bytes concurrently",
2161       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2162     }
2163     _numObjectsAllocated = 0;
2164     _numWordsAllocated   = 0;
2165   )
2166 }
2167 
2168 void CMSCollector::gc_epilogue(bool full) {
2169   // The following locking discipline assumes that we are only called
2170   // when the world is stopped.
2171   assert(SafepointSynchronize::is_at_safepoint(),
2172          "world is stopped assumption");
2173 
2174   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2175   // if linear allocation blocks need to be appropriately marked to allow the
2176   // the blocks to be parsable. We also check here whether we need to nudge the
2177   // CMS collector thread to start a new cycle (if it's not already active).
2178   assert(   Thread::current()->is_VM_thread()
2179          || (   CMSScavengeBeforeRemark
2180              && Thread::current()->is_ConcurrentGC_thread()),
2181          "Incorrect thread type for epilogue execution");
2182 
2183   if (!_between_prologue_and_epilogue) {
2184     // We have already been invoked; this is a gc_epilogue delegation
2185     // from yet another CMS generation that we are responsible for, just
2186     // ignore it since all relevant work has already been done.
2187     return;
2188   }
2189   assert(haveFreelistLocks(), "must have freelist locks");
2190   assert_lock_strong(bitMapLock());
2191 
2192   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2193 
2194   _cmsGen->gc_epilogue_work(full);
2195 
2196   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2197     // in case sampling was not already enabled, enable it
2198     _start_sampling = true;
2199   }
2200   // reset _eden_chunk_array so sampling starts afresh
2201   _eden_chunk_index = 0;
2202 
2203   size_t cms_used   = _cmsGen->cmsSpace()->used();
2204 
2205   // update performance counters - this uses a special version of
2206   // update_counters() that allows the utilization to be passed as a
2207   // parameter, avoiding multiple calls to used().
2208   //
2209   _cmsGen->update_counters(cms_used);
2210 
2211   bitMapLock()->unlock();
2212   releaseFreelistLocks();
2213 
2214   if (!CleanChunkPoolAsync) {
2215     Chunk::clean_chunk_pool();
2216   }
2217 
2218   set_did_compact(false);
2219   _between_prologue_and_epilogue = false;  // ready for next cycle
2220 }
2221 
2222 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2223   collector()->gc_epilogue(full);
2224 
2225   // Also reset promotion tracking in par gc thread states.
2226   for (uint i = 0; i < ParallelGCThreads; i++) {
2227     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2228   }
2229 }
2230 
2231 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2232   assert(!incremental_collection_failed(), "Should have been cleared");
2233   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2234   cmsSpace()->gc_epilogue();
2235     // Print stat counters
2236   NOT_PRODUCT(
2237     assert(_numObjectsAllocated == 0, "check");
2238     assert(_numWordsAllocated == 0, "check");
2239     if (Verbose && PrintGC) {
2240       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2241                           SIZE_FORMAT" bytes",
2242                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2243     }
2244     _numObjectsPromoted = 0;
2245     _numWordsPromoted   = 0;
2246   )
2247 
2248   if (PrintGC && Verbose) {
2249     // Call down the chain in contiguous_available needs the freelistLock
2250     // so print this out before releasing the freeListLock.
2251     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2252                         contiguous_available());
2253   }
2254 }
2255 
2256 #ifndef PRODUCT
2257 bool CMSCollector::have_cms_token() {
2258   Thread* thr = Thread::current();
2259   if (thr->is_VM_thread()) {
2260     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2261   } else if (thr->is_ConcurrentGC_thread()) {
2262     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2263   } else if (thr->is_GC_task_thread()) {
2264     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2265            ParGCRareEvent_lock->owned_by_self();
2266   }
2267   return false;
2268 }
2269 #endif
2270 
2271 // Check reachability of the given heap address in CMS generation,
2272 // treating all other generations as roots.
2273 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2274   // We could "guarantee" below, rather than assert, but I'll
2275   // leave these as "asserts" so that an adventurous debugger
2276   // could try this in the product build provided some subset of
2277   // the conditions were met, provided they were interested in the
2278   // results and knew that the computation below wouldn't interfere
2279   // with other concurrent computations mutating the structures
2280   // being read or written.
2281   assert(SafepointSynchronize::is_at_safepoint(),
2282          "Else mutations in object graph will make answer suspect");
2283   assert(have_cms_token(), "Should hold cms token");
2284   assert(haveFreelistLocks(), "must hold free list locks");
2285   assert_lock_strong(bitMapLock());
2286 
2287   // Clear the marking bit map array before starting, but, just
2288   // for kicks, first report if the given address is already marked
2289   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2290                 _markBitMap.isMarked(addr) ? "" : " not");
2291 
2292   if (verify_after_remark()) {
2293     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2294     bool result = verification_mark_bm()->isMarked(addr);
2295     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2296                            result ? "IS" : "is NOT");
2297     return result;
2298   } else {
2299     gclog_or_tty->print_cr("Could not compute result");
2300     return false;
2301   }
2302 }
2303 
2304 
2305 void
2306 CMSCollector::print_on_error(outputStream* st) {
2307   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2308   if (collector != NULL) {
2309     CMSBitMap* bitmap = &collector->_markBitMap;
2310     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2311     bitmap->print_on_error(st, " Bits: ");
2312 
2313     st->cr();
2314 
2315     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2316     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2317     mut_bitmap->print_on_error(st, " Bits: ");
2318   }
2319 }
2320 
2321 ////////////////////////////////////////////////////////
2322 // CMS Verification Support
2323 ////////////////////////////////////////////////////////
2324 // Following the remark phase, the following invariant
2325 // should hold -- each object in the CMS heap which is
2326 // marked in markBitMap() should be marked in the verification_mark_bm().
2327 
2328 class VerifyMarkedClosure: public BitMapClosure {
2329   CMSBitMap* _marks;
2330   bool       _failed;
2331 
2332  public:
2333   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2334 
2335   bool do_bit(size_t offset) {
2336     HeapWord* addr = _marks->offsetToHeapWord(offset);
2337     if (!_marks->isMarked(addr)) {
2338       oop(addr)->print_on(gclog_or_tty);
2339       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", p2i(addr));
2340       _failed = true;
2341     }
2342     return true;
2343   }
2344 
2345   bool failed() { return _failed; }
2346 };
2347 
2348 bool CMSCollector::verify_after_remark(bool silent) {
2349   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2350   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2351   static bool init = false;
2352 
2353   assert(SafepointSynchronize::is_at_safepoint(),
2354          "Else mutations in object graph will make answer suspect");
2355   assert(have_cms_token(),
2356          "Else there may be mutual interference in use of "
2357          " verification data structures");
2358   assert(_collectorState > Marking && _collectorState <= Sweeping,
2359          "Else marking info checked here may be obsolete");
2360   assert(haveFreelistLocks(), "must hold free list locks");
2361   assert_lock_strong(bitMapLock());
2362 
2363 
2364   // Allocate marking bit map if not already allocated
2365   if (!init) { // first time
2366     if (!verification_mark_bm()->allocate(_span)) {
2367       return false;
2368     }
2369     init = true;
2370   }
2371 
2372   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2373 
2374   // Turn off refs discovery -- so we will be tracing through refs.
2375   // This is as intended, because by this time
2376   // GC must already have cleared any refs that need to be cleared,
2377   // and traced those that need to be marked; moreover,
2378   // the marking done here is not going to interfere in any
2379   // way with the marking information used by GC.
2380   NoRefDiscovery no_discovery(ref_processor());
2381 
2382   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2383 
2384   // Clear any marks from a previous round
2385   verification_mark_bm()->clear_all();
2386   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2387   verify_work_stacks_empty();
2388 
2389   GenCollectedHeap* gch = GenCollectedHeap::heap();
2390   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2391   // Update the saved marks which may affect the root scans.
2392   gch->save_marks();
2393 
2394   if (CMSRemarkVerifyVariant == 1) {
2395     // In this first variant of verification, we complete
2396     // all marking, then check if the new marks-vector is
2397     // a subset of the CMS marks-vector.
2398     verify_after_remark_work_1();
2399   } else if (CMSRemarkVerifyVariant == 2) {
2400     // In this second variant of verification, we flag an error
2401     // (i.e. an object reachable in the new marks-vector not reachable
2402     // in the CMS marks-vector) immediately, also indicating the
2403     // identify of an object (A) that references the unmarked object (B) --
2404     // presumably, a mutation to A failed to be picked up by preclean/remark?
2405     verify_after_remark_work_2();
2406   } else {
2407     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2408             CMSRemarkVerifyVariant);
2409   }
2410   if (!silent) gclog_or_tty->print(" done] ");
2411   return true;
2412 }
2413 
2414 void CMSCollector::verify_after_remark_work_1() {
2415   ResourceMark rm;
2416   HandleMark  hm;
2417   GenCollectedHeap* gch = GenCollectedHeap::heap();
2418 
2419   // Get a clear set of claim bits for the roots processing to work with.
2420   ClassLoaderDataGraph::clear_claimed_marks();
2421 
2422   // Mark from roots one level into CMS
2423   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2424   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2425 
2426   {
2427     StrongRootsScope srs(1);
2428 
2429     gch->gen_process_roots(&srs,
2430                            _cmsGen->level(),
2431                            true,   // younger gens are roots
2432                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2433                            should_unload_classes(),
2434                            &notOlder,
2435                            NULL,
2436                            NULL);
2437   }
2438 
2439   // Now mark from the roots
2440   MarkFromRootsClosure markFromRootsClosure(this, _span,
2441     verification_mark_bm(), verification_mark_stack(),
2442     false /* don't yield */, true /* verifying */);
2443   assert(_restart_addr == NULL, "Expected pre-condition");
2444   verification_mark_bm()->iterate(&markFromRootsClosure);
2445   while (_restart_addr != NULL) {
2446     // Deal with stack overflow: by restarting at the indicated
2447     // address.
2448     HeapWord* ra = _restart_addr;
2449     markFromRootsClosure.reset(ra);
2450     _restart_addr = NULL;
2451     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2452   }
2453   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2454   verify_work_stacks_empty();
2455 
2456   // Marking completed -- now verify that each bit marked in
2457   // verification_mark_bm() is also marked in markBitMap(); flag all
2458   // errors by printing corresponding objects.
2459   VerifyMarkedClosure vcl(markBitMap());
2460   verification_mark_bm()->iterate(&vcl);
2461   if (vcl.failed()) {
2462     gclog_or_tty->print("Verification failed");
2463     gch->print_on(gclog_or_tty);
2464     fatal("CMS: failed marking verification after remark");
2465   }
2466 }
2467 
2468 class VerifyKlassOopsKlassClosure : public KlassClosure {
2469   class VerifyKlassOopsClosure : public OopClosure {
2470     CMSBitMap* _bitmap;
2471    public:
2472     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2473     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2474     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2475   } _oop_closure;
2476  public:
2477   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2478   void do_klass(Klass* k) {
2479     k->oops_do(&_oop_closure);
2480   }
2481 };
2482 
2483 void CMSCollector::verify_after_remark_work_2() {
2484   ResourceMark rm;
2485   HandleMark  hm;
2486   GenCollectedHeap* gch = GenCollectedHeap::heap();
2487 
2488   // Get a clear set of claim bits for the roots processing to work with.
2489   ClassLoaderDataGraph::clear_claimed_marks();
2490 
2491   // Mark from roots one level into CMS
2492   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2493                                      markBitMap());
2494   CLDToOopClosure cld_closure(&notOlder, true);
2495 
2496   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2497 
2498   {
2499     StrongRootsScope srs(1);
2500 
2501     gch->gen_process_roots(&srs,
2502                            _cmsGen->level(),
2503                            true,   // younger gens are roots
2504                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2505                            should_unload_classes(),
2506                            &notOlder,
2507                            NULL,
2508                            &cld_closure);
2509   }
2510 
2511   // Now mark from the roots
2512   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2513     verification_mark_bm(), markBitMap(), verification_mark_stack());
2514   assert(_restart_addr == NULL, "Expected pre-condition");
2515   verification_mark_bm()->iterate(&markFromRootsClosure);
2516   while (_restart_addr != NULL) {
2517     // Deal with stack overflow: by restarting at the indicated
2518     // address.
2519     HeapWord* ra = _restart_addr;
2520     markFromRootsClosure.reset(ra);
2521     _restart_addr = NULL;
2522     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2523   }
2524   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2525   verify_work_stacks_empty();
2526 
2527   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2528   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2529 
2530   // Marking completed -- now verify that each bit marked in
2531   // verification_mark_bm() is also marked in markBitMap(); flag all
2532   // errors by printing corresponding objects.
2533   VerifyMarkedClosure vcl(markBitMap());
2534   verification_mark_bm()->iterate(&vcl);
2535   assert(!vcl.failed(), "Else verification above should not have succeeded");
2536 }
2537 
2538 void ConcurrentMarkSweepGeneration::save_marks() {
2539   // delegate to CMS space
2540   cmsSpace()->save_marks();
2541   for (uint i = 0; i < ParallelGCThreads; i++) {
2542     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2543   }
2544 }
2545 
2546 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2547   return cmsSpace()->no_allocs_since_save_marks();
2548 }
2549 
2550 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2551                                                                 \
2552 void ConcurrentMarkSweepGeneration::                            \
2553 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2554   cl->set_generation(this);                                     \
2555   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2556   cl->reset_generation();                                       \
2557   save_marks();                                                 \
2558 }
2559 
2560 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2561 
2562 void
2563 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2564   if (freelistLock()->owned_by_self()) {
2565     Generation::oop_iterate(cl);
2566   } else {
2567     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2568     Generation::oop_iterate(cl);
2569   }
2570 }
2571 
2572 void
2573 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2574   if (freelistLock()->owned_by_self()) {
2575     Generation::object_iterate(cl);
2576   } else {
2577     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2578     Generation::object_iterate(cl);
2579   }
2580 }
2581 
2582 void
2583 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2584   if (freelistLock()->owned_by_self()) {
2585     Generation::safe_object_iterate(cl);
2586   } else {
2587     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2588     Generation::safe_object_iterate(cl);
2589   }
2590 }
2591 
2592 void
2593 ConcurrentMarkSweepGeneration::post_compact() {
2594 }
2595 
2596 void
2597 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2598   // Fix the linear allocation blocks to look like free blocks.
2599 
2600   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2601   // are not called when the heap is verified during universe initialization and
2602   // at vm shutdown.
2603   if (freelistLock()->owned_by_self()) {
2604     cmsSpace()->prepare_for_verify();
2605   } else {
2606     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2607     cmsSpace()->prepare_for_verify();
2608   }
2609 }
2610 
2611 void
2612 ConcurrentMarkSweepGeneration::verify() {
2613   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2614   // are not called when the heap is verified during universe initialization and
2615   // at vm shutdown.
2616   if (freelistLock()->owned_by_self()) {
2617     cmsSpace()->verify();
2618   } else {
2619     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2620     cmsSpace()->verify();
2621   }
2622 }
2623 
2624 void CMSCollector::verify() {
2625   _cmsGen->verify();
2626 }
2627 
2628 #ifndef PRODUCT
2629 bool CMSCollector::overflow_list_is_empty() const {
2630   assert(_num_par_pushes >= 0, "Inconsistency");
2631   if (_overflow_list == NULL) {
2632     assert(_num_par_pushes == 0, "Inconsistency");
2633   }
2634   return _overflow_list == NULL;
2635 }
2636 
2637 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2638 // merely consolidate assertion checks that appear to occur together frequently.
2639 void CMSCollector::verify_work_stacks_empty() const {
2640   assert(_markStack.isEmpty(), "Marking stack should be empty");
2641   assert(overflow_list_is_empty(), "Overflow list should be empty");
2642 }
2643 
2644 void CMSCollector::verify_overflow_empty() const {
2645   assert(overflow_list_is_empty(), "Overflow list should be empty");
2646   assert(no_preserved_marks(), "No preserved marks");
2647 }
2648 #endif // PRODUCT
2649 
2650 // Decide if we want to enable class unloading as part of the
2651 // ensuing concurrent GC cycle. We will collect and
2652 // unload classes if it's the case that:
2653 // (1) an explicit gc request has been made and the flag
2654 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2655 // (2) (a) class unloading is enabled at the command line, and
2656 //     (b) old gen is getting really full
2657 // NOTE: Provided there is no change in the state of the heap between
2658 // calls to this method, it should have idempotent results. Moreover,
2659 // its results should be monotonically increasing (i.e. going from 0 to 1,
2660 // but not 1 to 0) between successive calls between which the heap was
2661 // not collected. For the implementation below, it must thus rely on
2662 // the property that concurrent_cycles_since_last_unload()
2663 // will not decrease unless a collection cycle happened and that
2664 // _cmsGen->is_too_full() are
2665 // themselves also monotonic in that sense. See check_monotonicity()
2666 // below.
2667 void CMSCollector::update_should_unload_classes() {
2668   _should_unload_classes = false;
2669   // Condition 1 above
2670   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2671     _should_unload_classes = true;
2672   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2673     // Disjuncts 2.b.(i,ii,iii) above
2674     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2675                               CMSClassUnloadingMaxInterval)
2676                            || _cmsGen->is_too_full();
2677   }
2678 }
2679 
2680 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2681   bool res = should_concurrent_collect();
2682   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2683   return res;
2684 }
2685 
2686 void CMSCollector::setup_cms_unloading_and_verification_state() {
2687   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2688                              || VerifyBeforeExit;
2689   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2690 
2691   // We set the proper root for this CMS cycle here.
2692   if (should_unload_classes()) {   // Should unload classes this cycle
2693     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2694     set_verifying(should_verify);    // Set verification state for this cycle
2695     return;                            // Nothing else needs to be done at this time
2696   }
2697 
2698   // Not unloading classes this cycle
2699   assert(!should_unload_classes(), "Inconsistency!");
2700 
2701   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2702     // Include symbols, strings and code cache elements to prevent their resurrection.
2703     add_root_scanning_option(rso);
2704     set_verifying(true);
2705   } else if (verifying() && !should_verify) {
2706     // We were verifying, but some verification flags got disabled.
2707     set_verifying(false);
2708     // Exclude symbols, strings and code cache elements from root scanning to
2709     // reduce IM and RM pauses.
2710     remove_root_scanning_option(rso);
2711   }
2712 }
2713 
2714 
2715 #ifndef PRODUCT
2716 HeapWord* CMSCollector::block_start(const void* p) const {
2717   const HeapWord* addr = (HeapWord*)p;
2718   if (_span.contains(p)) {
2719     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2720       return _cmsGen->cmsSpace()->block_start(p);
2721     }
2722   }
2723   return NULL;
2724 }
2725 #endif
2726 
2727 HeapWord*
2728 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2729                                                    bool   tlab,
2730                                                    bool   parallel) {
2731   CMSSynchronousYieldRequest yr;
2732   assert(!tlab, "Can't deal with TLAB allocation");
2733   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2734   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2735   if (GCExpandToAllocateDelayMillis > 0) {
2736     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2737   }
2738   return have_lock_and_allocate(word_size, tlab);
2739 }
2740 
2741 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2742     size_t bytes,
2743     size_t expand_bytes,
2744     CMSExpansionCause::Cause cause)
2745 {
2746 
2747   bool success = expand(bytes, expand_bytes);
2748 
2749   // remember why we expanded; this information is used
2750   // by shouldConcurrentCollect() when making decisions on whether to start
2751   // a new CMS cycle.
2752   if (success) {
2753     set_expansion_cause(cause);
2754     if (PrintGCDetails && Verbose) {
2755       gclog_or_tty->print_cr("Expanded CMS gen for %s",
2756         CMSExpansionCause::to_string(cause));
2757     }
2758   }
2759 }
2760 
2761 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2762   HeapWord* res = NULL;
2763   MutexLocker x(ParGCRareEvent_lock);
2764   while (true) {
2765     // Expansion by some other thread might make alloc OK now:
2766     res = ps->lab.alloc(word_sz);
2767     if (res != NULL) return res;
2768     // If there's not enough expansion space available, give up.
2769     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2770       return NULL;
2771     }
2772     // Otherwise, we try expansion.
2773     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2774     // Now go around the loop and try alloc again;
2775     // A competing par_promote might beat us to the expansion space,
2776     // so we may go around the loop again if promotion fails again.
2777     if (GCExpandToAllocateDelayMillis > 0) {
2778       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2779     }
2780   }
2781 }
2782 
2783 
2784 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2785   PromotionInfo* promo) {
2786   MutexLocker x(ParGCRareEvent_lock);
2787   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2788   while (true) {
2789     // Expansion by some other thread might make alloc OK now:
2790     if (promo->ensure_spooling_space()) {
2791       assert(promo->has_spooling_space(),
2792              "Post-condition of successful ensure_spooling_space()");
2793       return true;
2794     }
2795     // If there's not enough expansion space available, give up.
2796     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2797       return false;
2798     }
2799     // Otherwise, we try expansion.
2800     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2801     // Now go around the loop and try alloc again;
2802     // A competing allocation might beat us to the expansion space,
2803     // so we may go around the loop again if allocation fails again.
2804     if (GCExpandToAllocateDelayMillis > 0) {
2805       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2806     }
2807   }
2808 }
2809 
2810 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2811   // Only shrink if a compaction was done so that all the free space
2812   // in the generation is in a contiguous block at the end.
2813   if (did_compact()) {
2814     CardGeneration::shrink(bytes);
2815   }
2816 }
2817 
2818 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2819   assert_locked_or_safepoint(Heap_lock);
2820 }
2821 
2822 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2823   assert_locked_or_safepoint(Heap_lock);
2824   assert_lock_strong(freelistLock());
2825   if (PrintGCDetails && Verbose) {
2826     warning("Shrinking of CMS not yet implemented");
2827   }
2828   return;
2829 }
2830 
2831 
2832 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2833 // phases.
2834 class CMSPhaseAccounting: public StackObj {
2835  public:
2836   CMSPhaseAccounting(CMSCollector *collector,
2837                      const char *phase,
2838                      const GCId gc_id,
2839                      bool print_cr = true);
2840   ~CMSPhaseAccounting();
2841 
2842  private:
2843   CMSCollector *_collector;
2844   const char *_phase;
2845   elapsedTimer _wallclock;
2846   bool _print_cr;
2847   const GCId _gc_id;
2848 
2849  public:
2850   // Not MT-safe; so do not pass around these StackObj's
2851   // where they may be accessed by other threads.
2852   jlong wallclock_millis() {
2853     assert(_wallclock.is_active(), "Wall clock should not stop");
2854     _wallclock.stop();  // to record time
2855     jlong ret = _wallclock.milliseconds();
2856     _wallclock.start(); // restart
2857     return ret;
2858   }
2859 };
2860 
2861 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2862                                        const char *phase,
2863                                        const GCId gc_id,
2864                                        bool print_cr) :
2865   _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2866 
2867   if (PrintCMSStatistics != 0) {
2868     _collector->resetYields();
2869   }
2870   if (PrintGCDetails) {
2871     gclog_or_tty->gclog_stamp(_gc_id);
2872     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2873       _collector->cmsGen()->short_name(), _phase);
2874   }
2875   _collector->resetTimer();
2876   _wallclock.start();
2877   _collector->startTimer();
2878 }
2879 
2880 CMSPhaseAccounting::~CMSPhaseAccounting() {
2881   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2882   _collector->stopTimer();
2883   _wallclock.stop();
2884   if (PrintGCDetails) {
2885     gclog_or_tty->gclog_stamp(_gc_id);
2886     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2887                  _collector->cmsGen()->short_name(),
2888                  _phase, _collector->timerValue(), _wallclock.seconds());
2889     if (_print_cr) {
2890       gclog_or_tty->cr();
2891     }
2892     if (PrintCMSStatistics != 0) {
2893       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2894                     _collector->yields());
2895     }
2896   }
2897 }
2898 
2899 // CMS work
2900 
2901 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2902 class CMSParMarkTask : public AbstractGangTask {
2903  protected:
2904   CMSCollector*     _collector;
2905   uint              _n_workers;
2906   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2907       AbstractGangTask(name),
2908       _collector(collector),
2909       _n_workers(n_workers) {}
2910   // Work method in support of parallel rescan ... of young gen spaces
2911   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2912                              ContiguousSpace* space,
2913                              HeapWord** chunk_array, size_t chunk_top);
2914   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2915 };
2916 
2917 // Parallel initial mark task
2918 class CMSParInitialMarkTask: public CMSParMarkTask {
2919   StrongRootsScope* _strong_roots_scope;
2920  public:
2921   CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2922       CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2923       _strong_roots_scope(strong_roots_scope) {}
2924   void work(uint worker_id);
2925 };
2926 
2927 // Checkpoint the roots into this generation from outside
2928 // this generation. [Note this initial checkpoint need only
2929 // be approximate -- we'll do a catch up phase subsequently.]
2930 void CMSCollector::checkpointRootsInitial() {
2931   assert(_collectorState == InitialMarking, "Wrong collector state");
2932   check_correct_thread_executing();
2933   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2934 
2935   save_heap_summary();
2936   report_heap_summary(GCWhen::BeforeGC);
2937 
2938   ReferenceProcessor* rp = ref_processor();
2939   assert(_restart_addr == NULL, "Control point invariant");
2940   {
2941     // acquire locks for subsequent manipulations
2942     MutexLockerEx x(bitMapLock(),
2943                     Mutex::_no_safepoint_check_flag);
2944     checkpointRootsInitialWork();
2945     // enable ("weak") refs discovery
2946     rp->enable_discovery();
2947     _collectorState = Marking;
2948   }
2949 }
2950 
2951 void CMSCollector::checkpointRootsInitialWork() {
2952   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2953   assert(_collectorState == InitialMarking, "just checking");
2954 
2955   // If there has not been a GC[n-1] since last GC[n] cycle completed,
2956   // precede our marking with a collection of all
2957   // younger generations to keep floating garbage to a minimum.
2958   // XXX: we won't do this for now -- it's an optimization to be done later.
2959 
2960   // already have locks
2961   assert_lock_strong(bitMapLock());
2962   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2963 
2964   // Setup the verification and class unloading state for this
2965   // CMS collection cycle.
2966   setup_cms_unloading_and_verification_state();
2967 
2968   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2969     PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2970 
2971   // Reset all the PLAB chunk arrays if necessary.
2972   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2973     reset_survivor_plab_arrays();
2974   }
2975 
2976   ResourceMark rm;
2977   HandleMark  hm;
2978 
2979   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2980   GenCollectedHeap* gch = GenCollectedHeap::heap();
2981 
2982   verify_work_stacks_empty();
2983   verify_overflow_empty();
2984 
2985   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2986   // Update the saved marks which may affect the root scans.
2987   gch->save_marks();
2988 
2989   // weak reference processing has not started yet.
2990   ref_processor()->set_enqueuing_is_done(false);
2991 
2992   // Need to remember all newly created CLDs,
2993   // so that we can guarantee that the remark finds them.
2994   ClassLoaderDataGraph::remember_new_clds(true);
2995 
2996   // Whenever a CLD is found, it will be claimed before proceeding to mark
2997   // the klasses. The claimed marks need to be cleared before marking starts.
2998   ClassLoaderDataGraph::clear_claimed_marks();
2999 
3000   if (CMSPrintEdenSurvivorChunks) {
3001     print_eden_and_survivor_chunk_arrays();
3002   }
3003 
3004   {
3005     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3006     if (CMSParallelInitialMarkEnabled) {
3007       // The parallel version.
3008       FlexibleWorkGang* workers = gch->workers();
3009       assert(workers != NULL, "Need parallel worker threads.");
3010       uint n_workers = workers->active_workers();
3011 
3012       StrongRootsScope srs(n_workers);
3013 
3014       CMSParInitialMarkTask tsk(this, &srs, n_workers);
3015       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3016       if (n_workers > 1) {
3017         workers->run_task(&tsk);
3018       } else {
3019         tsk.work(0);
3020       }
3021     } else {
3022       // The serial version.
3023       CLDToOopClosure cld_closure(&notOlder, true);
3024       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3025 
3026       StrongRootsScope srs(1);
3027 
3028       gch->gen_process_roots(&srs,
3029                              _cmsGen->level(),
3030                              true,   // younger gens are roots
3031                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3032                              should_unload_classes(),
3033                              &notOlder,
3034                              NULL,
3035                              &cld_closure);
3036     }
3037   }
3038 
3039   // Clear mod-union table; it will be dirtied in the prologue of
3040   // CMS generation per each younger generation collection.
3041 
3042   assert(_modUnionTable.isAllClear(),
3043        "Was cleared in most recent final checkpoint phase"
3044        " or no bits are set in the gc_prologue before the start of the next "
3045        "subsequent marking phase.");
3046 
3047   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3048 
3049   // Save the end of the used_region of the constituent generations
3050   // to be used to limit the extent of sweep in each generation.
3051   save_sweep_limits();
3052   verify_overflow_empty();
3053 }
3054 
3055 bool CMSCollector::markFromRoots() {
3056   // we might be tempted to assert that:
3057   // assert(!SafepointSynchronize::is_at_safepoint(),
3058   //        "inconsistent argument?");
3059   // However that wouldn't be right, because it's possible that
3060   // a safepoint is indeed in progress as a younger generation
3061   // stop-the-world GC happens even as we mark in this generation.
3062   assert(_collectorState == Marking, "inconsistent state?");
3063   check_correct_thread_executing();
3064   verify_overflow_empty();
3065 
3066   // Weak ref discovery note: We may be discovering weak
3067   // refs in this generation concurrent (but interleaved) with
3068   // weak ref discovery by a younger generation collector.
3069 
3070   CMSTokenSyncWithLocks ts(true, bitMapLock());
3071   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3072   CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3073   bool res = markFromRootsWork();
3074   if (res) {
3075     _collectorState = Precleaning;
3076   } else { // We failed and a foreground collection wants to take over
3077     assert(_foregroundGCIsActive, "internal state inconsistency");
3078     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3079     if (PrintGCDetails) {
3080       gclog_or_tty->print_cr("bailing out to foreground collection");
3081     }
3082   }
3083   verify_overflow_empty();
3084   return res;
3085 }
3086 
3087 bool CMSCollector::markFromRootsWork() {
3088   // iterate over marked bits in bit map, doing a full scan and mark
3089   // from these roots using the following algorithm:
3090   // . if oop is to the right of the current scan pointer,
3091   //   mark corresponding bit (we'll process it later)
3092   // . else (oop is to left of current scan pointer)
3093   //   push oop on marking stack
3094   // . drain the marking stack
3095 
3096   // Note that when we do a marking step we need to hold the
3097   // bit map lock -- recall that direct allocation (by mutators)
3098   // and promotion (by younger generation collectors) is also
3099   // marking the bit map. [the so-called allocate live policy.]
3100   // Because the implementation of bit map marking is not
3101   // robust wrt simultaneous marking of bits in the same word,
3102   // we need to make sure that there is no such interference
3103   // between concurrent such updates.
3104 
3105   // already have locks
3106   assert_lock_strong(bitMapLock());
3107 
3108   verify_work_stacks_empty();
3109   verify_overflow_empty();
3110   bool result = false;
3111   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3112     result = do_marking_mt();
3113   } else {
3114     result = do_marking_st();
3115   }
3116   return result;
3117 }
3118 
3119 // Forward decl
3120 class CMSConcMarkingTask;
3121 
3122 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3123   CMSCollector*       _collector;
3124   CMSConcMarkingTask* _task;
3125  public:
3126   virtual void yield();
3127 
3128   // "n_threads" is the number of threads to be terminated.
3129   // "queue_set" is a set of work queues of other threads.
3130   // "collector" is the CMS collector associated with this task terminator.
3131   // "yield" indicates whether we need the gang as a whole to yield.
3132   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3133     ParallelTaskTerminator(n_threads, queue_set),
3134     _collector(collector) { }
3135 
3136   void set_task(CMSConcMarkingTask* task) {
3137     _task = task;
3138   }
3139 };
3140 
3141 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3142   CMSConcMarkingTask* _task;
3143  public:
3144   bool should_exit_termination();
3145   void set_task(CMSConcMarkingTask* task) {
3146     _task = task;
3147   }
3148 };
3149 
3150 // MT Concurrent Marking Task
3151 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3152   CMSCollector* _collector;
3153   uint          _n_workers;       // requested/desired # workers
3154   bool          _result;
3155   CompactibleFreeListSpace*  _cms_space;
3156   char          _pad_front[64];   // padding to ...
3157   HeapWord*     _global_finger;   // ... avoid sharing cache line
3158   char          _pad_back[64];
3159   HeapWord*     _restart_addr;
3160 
3161   //  Exposed here for yielding support
3162   Mutex* const _bit_map_lock;
3163 
3164   // The per thread work queues, available here for stealing
3165   OopTaskQueueSet*  _task_queues;
3166 
3167   // Termination (and yielding) support
3168   CMSConcMarkingTerminator _term;
3169   CMSConcMarkingTerminatorTerminator _term_term;
3170 
3171  public:
3172   CMSConcMarkingTask(CMSCollector* collector,
3173                  CompactibleFreeListSpace* cms_space,
3174                  YieldingFlexibleWorkGang* workers,
3175                  OopTaskQueueSet* task_queues):
3176     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3177     _collector(collector),
3178     _cms_space(cms_space),
3179     _n_workers(0), _result(true),
3180     _task_queues(task_queues),
3181     _term(_n_workers, task_queues, _collector),
3182     _bit_map_lock(collector->bitMapLock())
3183   {
3184     _requested_size = _n_workers;
3185     _term.set_task(this);
3186     _term_term.set_task(this);
3187     _restart_addr = _global_finger = _cms_space->bottom();
3188   }
3189 
3190 
3191   OopTaskQueueSet* task_queues()  { return _task_queues; }
3192 
3193   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3194 
3195   HeapWord** global_finger_addr() { return &_global_finger; }
3196 
3197   CMSConcMarkingTerminator* terminator() { return &_term; }
3198 
3199   virtual void set_for_termination(uint active_workers) {
3200     terminator()->reset_for_reuse(active_workers);
3201   }
3202 
3203   void work(uint worker_id);
3204   bool should_yield() {
3205     return    ConcurrentMarkSweepThread::should_yield()
3206            && !_collector->foregroundGCIsActive();
3207   }
3208 
3209   virtual void coordinator_yield();  // stuff done by coordinator
3210   bool result() { return _result; }
3211 
3212   void reset(HeapWord* ra) {
3213     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3214     _restart_addr = _global_finger = ra;
3215     _term.reset_for_reuse();
3216   }
3217 
3218   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3219                                            OopTaskQueue* work_q);
3220 
3221  private:
3222   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3223   void do_work_steal(int i);
3224   void bump_global_finger(HeapWord* f);
3225 };
3226 
3227 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3228   assert(_task != NULL, "Error");
3229   return _task->yielding();
3230   // Note that we do not need the disjunct || _task->should_yield() above
3231   // because we want terminating threads to yield only if the task
3232   // is already in the midst of yielding, which happens only after at least one
3233   // thread has yielded.
3234 }
3235 
3236 void CMSConcMarkingTerminator::yield() {
3237   if (_task->should_yield()) {
3238     _task->yield();
3239   } else {
3240     ParallelTaskTerminator::yield();
3241   }
3242 }
3243 
3244 ////////////////////////////////////////////////////////////////
3245 // Concurrent Marking Algorithm Sketch
3246 ////////////////////////////////////////////////////////////////
3247 // Until all tasks exhausted (both spaces):
3248 // -- claim next available chunk
3249 // -- bump global finger via CAS
3250 // -- find first object that starts in this chunk
3251 //    and start scanning bitmap from that position
3252 // -- scan marked objects for oops
3253 // -- CAS-mark target, and if successful:
3254 //    . if target oop is above global finger (volatile read)
3255 //      nothing to do
3256 //    . if target oop is in chunk and above local finger
3257 //        then nothing to do
3258 //    . else push on work-queue
3259 // -- Deal with possible overflow issues:
3260 //    . local work-queue overflow causes stuff to be pushed on
3261 //      global (common) overflow queue
3262 //    . always first empty local work queue
3263 //    . then get a batch of oops from global work queue if any
3264 //    . then do work stealing
3265 // -- When all tasks claimed (both spaces)
3266 //    and local work queue empty,
3267 //    then in a loop do:
3268 //    . check global overflow stack; steal a batch of oops and trace
3269 //    . try to steal from other threads oif GOS is empty
3270 //    . if neither is available, offer termination
3271 // -- Terminate and return result
3272 //
3273 void CMSConcMarkingTask::work(uint worker_id) {
3274   elapsedTimer _timer;
3275   ResourceMark rm;
3276   HandleMark hm;
3277 
3278   DEBUG_ONLY(_collector->verify_overflow_empty();)
3279 
3280   // Before we begin work, our work queue should be empty
3281   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3282   // Scan the bitmap covering _cms_space, tracing through grey objects.
3283   _timer.start();
3284   do_scan_and_mark(worker_id, _cms_space);
3285   _timer.stop();
3286   if (PrintCMSStatistics != 0) {
3287     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3288       worker_id, _timer.seconds());
3289       // XXX: need xxx/xxx type of notation, two timers
3290   }
3291 
3292   // ... do work stealing
3293   _timer.reset();
3294   _timer.start();
3295   do_work_steal(worker_id);
3296   _timer.stop();
3297   if (PrintCMSStatistics != 0) {
3298     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3299       worker_id, _timer.seconds());
3300       // XXX: need xxx/xxx type of notation, two timers
3301   }
3302   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3303   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3304   // Note that under the current task protocol, the
3305   // following assertion is true even of the spaces
3306   // expanded since the completion of the concurrent
3307   // marking. XXX This will likely change under a strict
3308   // ABORT semantics.
3309   // After perm removal the comparison was changed to
3310   // greater than or equal to from strictly greater than.
3311   // Before perm removal the highest address sweep would
3312   // have been at the end of perm gen but now is at the
3313   // end of the tenured gen.
3314   assert(_global_finger >=  _cms_space->end(),
3315          "All tasks have been completed");
3316   DEBUG_ONLY(_collector->verify_overflow_empty();)
3317 }
3318 
3319 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3320   HeapWord* read = _global_finger;
3321   HeapWord* cur  = read;
3322   while (f > read) {
3323     cur = read;
3324     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3325     if (cur == read) {
3326       // our cas succeeded
3327       assert(_global_finger >= f, "protocol consistency");
3328       break;
3329     }
3330   }
3331 }
3332 
3333 // This is really inefficient, and should be redone by
3334 // using (not yet available) block-read and -write interfaces to the
3335 // stack and the work_queue. XXX FIX ME !!!
3336 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3337                                                       OopTaskQueue* work_q) {
3338   // Fast lock-free check
3339   if (ovflw_stk->length() == 0) {
3340     return false;
3341   }
3342   assert(work_q->size() == 0, "Shouldn't steal");
3343   MutexLockerEx ml(ovflw_stk->par_lock(),
3344                    Mutex::_no_safepoint_check_flag);
3345   // Grab up to 1/4 the size of the work queue
3346   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3347                     (size_t)ParGCDesiredObjsFromOverflowList);
3348   num = MIN2(num, ovflw_stk->length());
3349   for (int i = (int) num; i > 0; i--) {
3350     oop cur = ovflw_stk->pop();
3351     assert(cur != NULL, "Counted wrong?");
3352     work_q->push(cur);
3353   }
3354   return num > 0;
3355 }
3356 
3357 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3358   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3359   int n_tasks = pst->n_tasks();
3360   // We allow that there may be no tasks to do here because
3361   // we are restarting after a stack overflow.
3362   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3363   uint nth_task = 0;
3364 
3365   HeapWord* aligned_start = sp->bottom();
3366   if (sp->used_region().contains(_restart_addr)) {
3367     // Align down to a card boundary for the start of 0th task
3368     // for this space.
3369     aligned_start =
3370       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3371                                  CardTableModRefBS::card_size);
3372   }
3373 
3374   size_t chunk_size = sp->marking_task_size();
3375   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3376     // Having claimed the nth task in this space,
3377     // compute the chunk that it corresponds to:
3378     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3379                                aligned_start + (nth_task+1)*chunk_size);
3380     // Try and bump the global finger via a CAS;
3381     // note that we need to do the global finger bump
3382     // _before_ taking the intersection below, because
3383     // the task corresponding to that region will be
3384     // deemed done even if the used_region() expands
3385     // because of allocation -- as it almost certainly will
3386     // during start-up while the threads yield in the
3387     // closure below.
3388     HeapWord* finger = span.end();
3389     bump_global_finger(finger);   // atomically
3390     // There are null tasks here corresponding to chunks
3391     // beyond the "top" address of the space.
3392     span = span.intersection(sp->used_region());
3393     if (!span.is_empty()) {  // Non-null task
3394       HeapWord* prev_obj;
3395       assert(!span.contains(_restart_addr) || nth_task == 0,
3396              "Inconsistency");
3397       if (nth_task == 0) {
3398         // For the 0th task, we'll not need to compute a block_start.
3399         if (span.contains(_restart_addr)) {
3400           // In the case of a restart because of stack overflow,
3401           // we might additionally skip a chunk prefix.
3402           prev_obj = _restart_addr;
3403         } else {
3404           prev_obj = span.start();
3405         }
3406       } else {
3407         // We want to skip the first object because
3408         // the protocol is to scan any object in its entirety
3409         // that _starts_ in this span; a fortiori, any
3410         // object starting in an earlier span is scanned
3411         // as part of an earlier claimed task.
3412         // Below we use the "careful" version of block_start
3413         // so we do not try to navigate uninitialized objects.
3414         prev_obj = sp->block_start_careful(span.start());
3415         // Below we use a variant of block_size that uses the
3416         // Printezis bits to avoid waiting for allocated
3417         // objects to become initialized/parsable.
3418         while (prev_obj < span.start()) {
3419           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3420           if (sz > 0) {
3421             prev_obj += sz;
3422           } else {
3423             // In this case we may end up doing a bit of redundant
3424             // scanning, but that appears unavoidable, short of
3425             // locking the free list locks; see bug 6324141.
3426             break;
3427           }
3428         }
3429       }
3430       if (prev_obj < span.end()) {
3431         MemRegion my_span = MemRegion(prev_obj, span.end());
3432         // Do the marking work within a non-empty span --
3433         // the last argument to the constructor indicates whether the
3434         // iteration should be incremental with periodic yields.
3435         Par_MarkFromRootsClosure cl(this, _collector, my_span,
3436                                     &_collector->_markBitMap,
3437                                     work_queue(i),
3438                                     &_collector->_markStack);
3439         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3440       } // else nothing to do for this task
3441     }   // else nothing to do for this task
3442   }
3443   // We'd be tempted to assert here that since there are no
3444   // more tasks left to claim in this space, the global_finger
3445   // must exceed space->top() and a fortiori space->end(). However,
3446   // that would not quite be correct because the bumping of
3447   // global_finger occurs strictly after the claiming of a task,
3448   // so by the time we reach here the global finger may not yet
3449   // have been bumped up by the thread that claimed the last
3450   // task.
3451   pst->all_tasks_completed();
3452 }
3453 
3454 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3455  private:
3456   CMSCollector* _collector;
3457   CMSConcMarkingTask* _task;
3458   MemRegion     _span;
3459   CMSBitMap*    _bit_map;
3460   CMSMarkStack* _overflow_stack;
3461   OopTaskQueue* _work_queue;
3462  protected:
3463   DO_OOP_WORK_DEFN
3464  public:
3465   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3466                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3467     MetadataAwareOopClosure(collector->ref_processor()),
3468     _collector(collector),
3469     _task(task),
3470     _span(collector->_span),
3471     _work_queue(work_queue),
3472     _bit_map(bit_map),
3473     _overflow_stack(overflow_stack)
3474   { }
3475   virtual void do_oop(oop* p);
3476   virtual void do_oop(narrowOop* p);
3477 
3478   void trim_queue(size_t max);
3479   void handle_stack_overflow(HeapWord* lost);
3480   void do_yield_check() {
3481     if (_task->should_yield()) {
3482       _task->yield();
3483     }
3484   }
3485 };
3486 
3487 // Grey object scanning during work stealing phase --
3488 // the salient assumption here is that any references
3489 // that are in these stolen objects being scanned must
3490 // already have been initialized (else they would not have
3491 // been published), so we do not need to check for
3492 // uninitialized objects before pushing here.
3493 void Par_ConcMarkingClosure::do_oop(oop obj) {
3494   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3495   HeapWord* addr = (HeapWord*)obj;
3496   // Check if oop points into the CMS generation
3497   // and is not marked
3498   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3499     // a white object ...
3500     // If we manage to "claim" the object, by being the
3501     // first thread to mark it, then we push it on our
3502     // marking stack
3503     if (_bit_map->par_mark(addr)) {     // ... now grey
3504       // push on work queue (grey set)
3505       bool simulate_overflow = false;
3506       NOT_PRODUCT(
3507         if (CMSMarkStackOverflowALot &&
3508             _collector->simulate_overflow()) {
3509           // simulate a stack overflow
3510           simulate_overflow = true;
3511         }
3512       )
3513       if (simulate_overflow ||
3514           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3515         // stack overflow
3516         if (PrintCMSStatistics != 0) {
3517           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3518                                  SIZE_FORMAT, _overflow_stack->capacity());
3519         }
3520         // We cannot assert that the overflow stack is full because
3521         // it may have been emptied since.
3522         assert(simulate_overflow ||
3523                _work_queue->size() == _work_queue->max_elems(),
3524               "Else push should have succeeded");
3525         handle_stack_overflow(addr);
3526       }
3527     } // Else, some other thread got there first
3528     do_yield_check();
3529   }
3530 }
3531 
3532 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3533 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3534 
3535 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3536   while (_work_queue->size() > max) {
3537     oop new_oop;
3538     if (_work_queue->pop_local(new_oop)) {
3539       assert(new_oop->is_oop(), "Should be an oop");
3540       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3541       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3542       new_oop->oop_iterate(this);  // do_oop() above
3543       do_yield_check();
3544     }
3545   }
3546 }
3547 
3548 // Upon stack overflow, we discard (part of) the stack,
3549 // remembering the least address amongst those discarded
3550 // in CMSCollector's _restart_address.
3551 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3552   // We need to do this under a mutex to prevent other
3553   // workers from interfering with the work done below.
3554   MutexLockerEx ml(_overflow_stack->par_lock(),
3555                    Mutex::_no_safepoint_check_flag);
3556   // Remember the least grey address discarded
3557   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3558   _collector->lower_restart_addr(ra);
3559   _overflow_stack->reset();  // discard stack contents
3560   _overflow_stack->expand(); // expand the stack if possible
3561 }
3562 
3563 
3564 void CMSConcMarkingTask::do_work_steal(int i) {
3565   OopTaskQueue* work_q = work_queue(i);
3566   oop obj_to_scan;
3567   CMSBitMap* bm = &(_collector->_markBitMap);
3568   CMSMarkStack* ovflw = &(_collector->_markStack);
3569   int* seed = _collector->hash_seed(i);
3570   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3571   while (true) {
3572     cl.trim_queue(0);
3573     assert(work_q->size() == 0, "Should have been emptied above");
3574     if (get_work_from_overflow_stack(ovflw, work_q)) {
3575       // Can't assert below because the work obtained from the
3576       // overflow stack may already have been stolen from us.
3577       // assert(work_q->size() > 0, "Work from overflow stack");
3578       continue;
3579     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3580       assert(obj_to_scan->is_oop(), "Should be an oop");
3581       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3582       obj_to_scan->oop_iterate(&cl);
3583     } else if (terminator()->offer_termination(&_term_term)) {
3584       assert(work_q->size() == 0, "Impossible!");
3585       break;
3586     } else if (yielding() || should_yield()) {
3587       yield();
3588     }
3589   }
3590 }
3591 
3592 // This is run by the CMS (coordinator) thread.
3593 void CMSConcMarkingTask::coordinator_yield() {
3594   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3595          "CMS thread should hold CMS token");
3596   // First give up the locks, then yield, then re-lock
3597   // We should probably use a constructor/destructor idiom to
3598   // do this unlock/lock or modify the MutexUnlocker class to
3599   // serve our purpose. XXX
3600   assert_lock_strong(_bit_map_lock);
3601   _bit_map_lock->unlock();
3602   ConcurrentMarkSweepThread::desynchronize(true);
3603   _collector->stopTimer();
3604   if (PrintCMSStatistics != 0) {
3605     _collector->incrementYields();
3606   }
3607 
3608   // It is possible for whichever thread initiated the yield request
3609   // not to get a chance to wake up and take the bitmap lock between
3610   // this thread releasing it and reacquiring it. So, while the
3611   // should_yield() flag is on, let's sleep for a bit to give the
3612   // other thread a chance to wake up. The limit imposed on the number
3613   // of iterations is defensive, to avoid any unforseen circumstances
3614   // putting us into an infinite loop. Since it's always been this
3615   // (coordinator_yield()) method that was observed to cause the
3616   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3617   // which is by default non-zero. For the other seven methods that
3618   // also perform the yield operation, as are using a different
3619   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3620   // can enable the sleeping for those methods too, if necessary.
3621   // See 6442774.
3622   //
3623   // We really need to reconsider the synchronization between the GC
3624   // thread and the yield-requesting threads in the future and we
3625   // should really use wait/notify, which is the recommended
3626   // way of doing this type of interaction. Additionally, we should
3627   // consolidate the eight methods that do the yield operation and they
3628   // are almost identical into one for better maintainability and
3629   // readability. See 6445193.
3630   //
3631   // Tony 2006.06.29
3632   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3633                    ConcurrentMarkSweepThread::should_yield() &&
3634                    !CMSCollector::foregroundGCIsActive(); ++i) {
3635     os::sleep(Thread::current(), 1, false);
3636   }
3637 
3638   ConcurrentMarkSweepThread::synchronize(true);
3639   _bit_map_lock->lock_without_safepoint_check();
3640   _collector->startTimer();
3641 }
3642 
3643 bool CMSCollector::do_marking_mt() {
3644   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3645   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3646                                                                   conc_workers()->active_workers(),
3647                                                                   Threads::number_of_non_daemon_threads());
3648   conc_workers()->set_active_workers(num_workers);
3649 
3650   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3651 
3652   CMSConcMarkingTask tsk(this,
3653                          cms_space,
3654                          conc_workers(),
3655                          task_queues());
3656 
3657   // Since the actual number of workers we get may be different
3658   // from the number we requested above, do we need to do anything different
3659   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3660   // class?? XXX
3661   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3662 
3663   // Refs discovery is already non-atomic.
3664   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3665   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3666   conc_workers()->start_task(&tsk);
3667   while (tsk.yielded()) {
3668     tsk.coordinator_yield();
3669     conc_workers()->continue_task(&tsk);
3670   }
3671   // If the task was aborted, _restart_addr will be non-NULL
3672   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3673   while (_restart_addr != NULL) {
3674     // XXX For now we do not make use of ABORTED state and have not
3675     // yet implemented the right abort semantics (even in the original
3676     // single-threaded CMS case). That needs some more investigation
3677     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3678     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3679     // If _restart_addr is non-NULL, a marking stack overflow
3680     // occurred; we need to do a fresh marking iteration from the
3681     // indicated restart address.
3682     if (_foregroundGCIsActive) {
3683       // We may be running into repeated stack overflows, having
3684       // reached the limit of the stack size, while making very
3685       // slow forward progress. It may be best to bail out and
3686       // let the foreground collector do its job.
3687       // Clear _restart_addr, so that foreground GC
3688       // works from scratch. This avoids the headache of
3689       // a "rescan" which would otherwise be needed because
3690       // of the dirty mod union table & card table.
3691       _restart_addr = NULL;
3692       return false;
3693     }
3694     // Adjust the task to restart from _restart_addr
3695     tsk.reset(_restart_addr);
3696     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3697                   _restart_addr);
3698     _restart_addr = NULL;
3699     // Get the workers going again
3700     conc_workers()->start_task(&tsk);
3701     while (tsk.yielded()) {
3702       tsk.coordinator_yield();
3703       conc_workers()->continue_task(&tsk);
3704     }
3705   }
3706   assert(tsk.completed(), "Inconsistency");
3707   assert(tsk.result() == true, "Inconsistency");
3708   return true;
3709 }
3710 
3711 bool CMSCollector::do_marking_st() {
3712   ResourceMark rm;
3713   HandleMark   hm;
3714 
3715   // Temporarily make refs discovery single threaded (non-MT)
3716   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3717   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3718     &_markStack, CMSYield);
3719   // the last argument to iterate indicates whether the iteration
3720   // should be incremental with periodic yields.
3721   _markBitMap.iterate(&markFromRootsClosure);
3722   // If _restart_addr is non-NULL, a marking stack overflow
3723   // occurred; we need to do a fresh iteration from the
3724   // indicated restart address.
3725   while (_restart_addr != NULL) {
3726     if (_foregroundGCIsActive) {
3727       // We may be running into repeated stack overflows, having
3728       // reached the limit of the stack size, while making very
3729       // slow forward progress. It may be best to bail out and
3730       // let the foreground collector do its job.
3731       // Clear _restart_addr, so that foreground GC
3732       // works from scratch. This avoids the headache of
3733       // a "rescan" which would otherwise be needed because
3734       // of the dirty mod union table & card table.
3735       _restart_addr = NULL;
3736       return false;  // indicating failure to complete marking
3737     }
3738     // Deal with stack overflow:
3739     // we restart marking from _restart_addr
3740     HeapWord* ra = _restart_addr;
3741     markFromRootsClosure.reset(ra);
3742     _restart_addr = NULL;
3743     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3744   }
3745   return true;
3746 }
3747 
3748 void CMSCollector::preclean() {
3749   check_correct_thread_executing();
3750   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3751   verify_work_stacks_empty();
3752   verify_overflow_empty();
3753   _abort_preclean = false;
3754   if (CMSPrecleaningEnabled) {
3755     if (!CMSEdenChunksRecordAlways) {
3756       _eden_chunk_index = 0;
3757     }
3758     size_t used = get_eden_used();
3759     size_t capacity = get_eden_capacity();
3760     // Don't start sampling unless we will get sufficiently
3761     // many samples.
3762     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3763                 * CMSScheduleRemarkEdenPenetration)) {
3764       _start_sampling = true;
3765     } else {
3766       _start_sampling = false;
3767     }
3768     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3769     CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3770     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3771   }
3772   CMSTokenSync x(true); // is cms thread
3773   if (CMSPrecleaningEnabled) {
3774     sample_eden();
3775     _collectorState = AbortablePreclean;
3776   } else {
3777     _collectorState = FinalMarking;
3778   }
3779   verify_work_stacks_empty();
3780   verify_overflow_empty();
3781 }
3782 
3783 // Try and schedule the remark such that young gen
3784 // occupancy is CMSScheduleRemarkEdenPenetration %.
3785 void CMSCollector::abortable_preclean() {
3786   check_correct_thread_executing();
3787   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3788   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3789 
3790   // If Eden's current occupancy is below this threshold,
3791   // immediately schedule the remark; else preclean
3792   // past the next scavenge in an effort to
3793   // schedule the pause as described above. By choosing
3794   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3795   // we will never do an actual abortable preclean cycle.
3796   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3797     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3798     CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3799     // We need more smarts in the abortable preclean
3800     // loop below to deal with cases where allocation
3801     // in young gen is very very slow, and our precleaning
3802     // is running a losing race against a horde of
3803     // mutators intent on flooding us with CMS updates
3804     // (dirty cards).
3805     // One, admittedly dumb, strategy is to give up
3806     // after a certain number of abortable precleaning loops
3807     // or after a certain maximum time. We want to make
3808     // this smarter in the next iteration.
3809     // XXX FIX ME!!! YSR
3810     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3811     while (!(should_abort_preclean() ||
3812              ConcurrentMarkSweepThread::should_terminate())) {
3813       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3814       cumworkdone += workdone;
3815       loops++;
3816       // Voluntarily terminate abortable preclean phase if we have
3817       // been at it for too long.
3818       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3819           loops >= CMSMaxAbortablePrecleanLoops) {
3820         if (PrintGCDetails) {
3821           gclog_or_tty->print(" CMS: abort preclean due to loops ");
3822         }
3823         break;
3824       }
3825       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3826         if (PrintGCDetails) {
3827           gclog_or_tty->print(" CMS: abort preclean due to time ");
3828         }
3829         break;
3830       }
3831       // If we are doing little work each iteration, we should
3832       // take a short break.
3833       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3834         // Sleep for some time, waiting for work to accumulate
3835         stopTimer();
3836         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3837         startTimer();
3838         waited++;
3839       }
3840     }
3841     if (PrintCMSStatistics > 0) {
3842       gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3843                           loops, waited, cumworkdone);
3844     }
3845   }
3846   CMSTokenSync x(true); // is cms thread
3847   if (_collectorState != Idling) {
3848     assert(_collectorState == AbortablePreclean,
3849            "Spontaneous state transition?");
3850     _collectorState = FinalMarking;
3851   } // Else, a foreground collection completed this CMS cycle.
3852   return;
3853 }
3854 
3855 // Respond to an Eden sampling opportunity
3856 void CMSCollector::sample_eden() {
3857   // Make sure a young gc cannot sneak in between our
3858   // reading and recording of a sample.
3859   assert(Thread::current()->is_ConcurrentGC_thread(),
3860          "Only the cms thread may collect Eden samples");
3861   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3862          "Should collect samples while holding CMS token");
3863   if (!_start_sampling) {
3864     return;
3865   }
3866   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3867   // is populated by the young generation.
3868   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3869     if (_eden_chunk_index < _eden_chunk_capacity) {
3870       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3871       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3872              "Unexpected state of Eden");
3873       // We'd like to check that what we just sampled is an oop-start address;
3874       // however, we cannot do that here since the object may not yet have been
3875       // initialized. So we'll instead do the check when we _use_ this sample
3876       // later.
3877       if (_eden_chunk_index == 0 ||
3878           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3879                          _eden_chunk_array[_eden_chunk_index-1])
3880            >= CMSSamplingGrain)) {
3881         _eden_chunk_index++;  // commit sample
3882       }
3883     }
3884   }
3885   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3886     size_t used = get_eden_used();
3887     size_t capacity = get_eden_capacity();
3888     assert(used <= capacity, "Unexpected state of Eden");
3889     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3890       _abort_preclean = true;
3891     }
3892   }
3893 }
3894 
3895 
3896 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3897   assert(_collectorState == Precleaning ||
3898          _collectorState == AbortablePreclean, "incorrect state");
3899   ResourceMark rm;
3900   HandleMark   hm;
3901 
3902   // Precleaning is currently not MT but the reference processor
3903   // may be set for MT.  Disable it temporarily here.
3904   ReferenceProcessor* rp = ref_processor();
3905   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3906 
3907   // Do one pass of scrubbing the discovered reference lists
3908   // to remove any reference objects with strongly-reachable
3909   // referents.
3910   if (clean_refs) {
3911     CMSPrecleanRefsYieldClosure yield_cl(this);
3912     assert(rp->span().equals(_span), "Spans should be equal");
3913     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3914                                    &_markStack, true /* preclean */);
3915     CMSDrainMarkingStackClosure complete_trace(this,
3916                                    _span, &_markBitMap, &_markStack,
3917                                    &keep_alive, true /* preclean */);
3918 
3919     // We don't want this step to interfere with a young
3920     // collection because we don't want to take CPU
3921     // or memory bandwidth away from the young GC threads
3922     // (which may be as many as there are CPUs).
3923     // Note that we don't need to protect ourselves from
3924     // interference with mutators because they can't
3925     // manipulate the discovered reference lists nor affect
3926     // the computed reachability of the referents, the
3927     // only properties manipulated by the precleaning
3928     // of these reference lists.
3929     stopTimer();
3930     CMSTokenSyncWithLocks x(true /* is cms thread */,
3931                             bitMapLock());
3932     startTimer();
3933     sample_eden();
3934 
3935     // The following will yield to allow foreground
3936     // collection to proceed promptly. XXX YSR:
3937     // The code in this method may need further
3938     // tweaking for better performance and some restructuring
3939     // for cleaner interfaces.
3940     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3941     rp->preclean_discovered_references(
3942           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3943           gc_timer, _gc_tracer_cm->gc_id());
3944   }
3945 
3946   if (clean_survivor) {  // preclean the active survivor space(s)
3947     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3948                              &_markBitMap, &_modUnionTable,
3949                              &_markStack, true /* precleaning phase */);
3950     stopTimer();
3951     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3952                              bitMapLock());
3953     startTimer();
3954     unsigned int before_count =
3955       GenCollectedHeap::heap()->total_collections();
3956     SurvivorSpacePrecleanClosure
3957       sss_cl(this, _span, &_markBitMap, &_markStack,
3958              &pam_cl, before_count, CMSYield);
3959     _young_gen->from()->object_iterate_careful(&sss_cl);
3960     _young_gen->to()->object_iterate_careful(&sss_cl);
3961   }
3962   MarkRefsIntoAndScanClosure
3963     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3964              &_markStack, this, CMSYield,
3965              true /* precleaning phase */);
3966   // CAUTION: The following closure has persistent state that may need to
3967   // be reset upon a decrease in the sequence of addresses it
3968   // processes.
3969   ScanMarkedObjectsAgainCarefullyClosure
3970     smoac_cl(this, _span,
3971       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3972 
3973   // Preclean dirty cards in ModUnionTable and CardTable using
3974   // appropriate convergence criterion;
3975   // repeat CMSPrecleanIter times unless we find that
3976   // we are losing.
3977   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3978   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3979          "Bad convergence multiplier");
3980   assert(CMSPrecleanThreshold >= 100,
3981          "Unreasonably low CMSPrecleanThreshold");
3982 
3983   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3984   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3985        numIter < CMSPrecleanIter;
3986        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3987     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3988     if (Verbose && PrintGCDetails) {
3989       gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3990     }
3991     // Either there are very few dirty cards, so re-mark
3992     // pause will be small anyway, or our pre-cleaning isn't
3993     // that much faster than the rate at which cards are being
3994     // dirtied, so we might as well stop and re-mark since
3995     // precleaning won't improve our re-mark time by much.
3996     if (curNumCards <= CMSPrecleanThreshold ||
3997         (numIter > 0 &&
3998          (curNumCards * CMSPrecleanDenominator >
3999          lastNumCards * CMSPrecleanNumerator))) {
4000       numIter++;
4001       cumNumCards += curNumCards;
4002       break;
4003     }
4004   }
4005 
4006   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4007 
4008   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4009   cumNumCards += curNumCards;
4010   if (PrintGCDetails && PrintCMSStatistics != 0) {
4011     gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4012                   curNumCards, cumNumCards, numIter);
4013   }
4014   return cumNumCards;   // as a measure of useful work done
4015 }
4016 
4017 // PRECLEANING NOTES:
4018 // Precleaning involves:
4019 // . reading the bits of the modUnionTable and clearing the set bits.
4020 // . For the cards corresponding to the set bits, we scan the
4021 //   objects on those cards. This means we need the free_list_lock
4022 //   so that we can safely iterate over the CMS space when scanning
4023 //   for oops.
4024 // . When we scan the objects, we'll be both reading and setting
4025 //   marks in the marking bit map, so we'll need the marking bit map.
4026 // . For protecting _collector_state transitions, we take the CGC_lock.
4027 //   Note that any races in the reading of of card table entries by the
4028 //   CMS thread on the one hand and the clearing of those entries by the
4029 //   VM thread or the setting of those entries by the mutator threads on the
4030 //   other are quite benign. However, for efficiency it makes sense to keep
4031 //   the VM thread from racing with the CMS thread while the latter is
4032 //   dirty card info to the modUnionTable. We therefore also use the
4033 //   CGC_lock to protect the reading of the card table and the mod union
4034 //   table by the CM thread.
4035 // . We run concurrently with mutator updates, so scanning
4036 //   needs to be done carefully  -- we should not try to scan
4037 //   potentially uninitialized objects.
4038 //
4039 // Locking strategy: While holding the CGC_lock, we scan over and
4040 // reset a maximal dirty range of the mod union / card tables, then lock
4041 // the free_list_lock and bitmap lock to do a full marking, then
4042 // release these locks; and repeat the cycle. This allows for a
4043 // certain amount of fairness in the sharing of these locks between
4044 // the CMS collector on the one hand, and the VM thread and the
4045 // mutators on the other.
4046 
4047 // NOTE: preclean_mod_union_table() and preclean_card_table()
4048 // further below are largely identical; if you need to modify
4049 // one of these methods, please check the other method too.
4050 
4051 size_t CMSCollector::preclean_mod_union_table(
4052   ConcurrentMarkSweepGeneration* gen,
4053   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4054   verify_work_stacks_empty();
4055   verify_overflow_empty();
4056 
4057   // strategy: starting with the first card, accumulate contiguous
4058   // ranges of dirty cards; clear these cards, then scan the region
4059   // covered by these cards.
4060 
4061   // Since all of the MUT is committed ahead, we can just use
4062   // that, in case the generations expand while we are precleaning.
4063   // It might also be fine to just use the committed part of the
4064   // generation, but we might potentially miss cards when the
4065   // generation is rapidly expanding while we are in the midst
4066   // of precleaning.
4067   HeapWord* startAddr = gen->reserved().start();
4068   HeapWord* endAddr   = gen->reserved().end();
4069 
4070   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4071 
4072   size_t numDirtyCards, cumNumDirtyCards;
4073   HeapWord *nextAddr, *lastAddr;
4074   for (cumNumDirtyCards = numDirtyCards = 0,
4075        nextAddr = lastAddr = startAddr;
4076        nextAddr < endAddr;
4077        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4078 
4079     ResourceMark rm;
4080     HandleMark   hm;
4081 
4082     MemRegion dirtyRegion;
4083     {
4084       stopTimer();
4085       // Potential yield point
4086       CMSTokenSync ts(true);
4087       startTimer();
4088       sample_eden();
4089       // Get dirty region starting at nextOffset (inclusive),
4090       // simultaneously clearing it.
4091       dirtyRegion =
4092         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4093       assert(dirtyRegion.start() >= nextAddr,
4094              "returned region inconsistent?");
4095     }
4096     // Remember where the next search should begin.
4097     // The returned region (if non-empty) is a right open interval,
4098     // so lastOffset is obtained from the right end of that
4099     // interval.
4100     lastAddr = dirtyRegion.end();
4101     // Should do something more transparent and less hacky XXX
4102     numDirtyCards =
4103       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4104 
4105     // We'll scan the cards in the dirty region (with periodic
4106     // yields for foreground GC as needed).
4107     if (!dirtyRegion.is_empty()) {
4108       assert(numDirtyCards > 0, "consistency check");
4109       HeapWord* stop_point = NULL;
4110       stopTimer();
4111       // Potential yield point
4112       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4113                                bitMapLock());
4114       startTimer();
4115       {
4116         verify_work_stacks_empty();
4117         verify_overflow_empty();
4118         sample_eden();
4119         stop_point =
4120           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4121       }
4122       if (stop_point != NULL) {
4123         // The careful iteration stopped early either because it found an
4124         // uninitialized object, or because we were in the midst of an
4125         // "abortable preclean", which should now be aborted. Redirty
4126         // the bits corresponding to the partially-scanned or unscanned
4127         // cards. We'll either restart at the next block boundary or
4128         // abort the preclean.
4129         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4130                "Should only be AbortablePreclean.");
4131         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4132         if (should_abort_preclean()) {
4133           break; // out of preclean loop
4134         } else {
4135           // Compute the next address at which preclean should pick up;
4136           // might need bitMapLock in order to read P-bits.
4137           lastAddr = next_card_start_after_block(stop_point);
4138         }
4139       }
4140     } else {
4141       assert(lastAddr == endAddr, "consistency check");
4142       assert(numDirtyCards == 0, "consistency check");
4143       break;
4144     }
4145   }
4146   verify_work_stacks_empty();
4147   verify_overflow_empty();
4148   return cumNumDirtyCards;
4149 }
4150 
4151 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4152 // below are largely identical; if you need to modify
4153 // one of these methods, please check the other method too.
4154 
4155 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4156   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4157   // strategy: it's similar to precleamModUnionTable above, in that
4158   // we accumulate contiguous ranges of dirty cards, mark these cards
4159   // precleaned, then scan the region covered by these cards.
4160   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4161   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4162 
4163   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4164 
4165   size_t numDirtyCards, cumNumDirtyCards;
4166   HeapWord *lastAddr, *nextAddr;
4167 
4168   for (cumNumDirtyCards = numDirtyCards = 0,
4169        nextAddr = lastAddr = startAddr;
4170        nextAddr < endAddr;
4171        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4172 
4173     ResourceMark rm;
4174     HandleMark   hm;
4175 
4176     MemRegion dirtyRegion;
4177     {
4178       // See comments in "Precleaning notes" above on why we
4179       // do this locking. XXX Could the locking overheads be
4180       // too high when dirty cards are sparse? [I don't think so.]
4181       stopTimer();
4182       CMSTokenSync x(true); // is cms thread
4183       startTimer();
4184       sample_eden();
4185       // Get and clear dirty region from card table
4186       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4187                                     MemRegion(nextAddr, endAddr),
4188                                     true,
4189                                     CardTableModRefBS::precleaned_card_val());
4190 
4191       assert(dirtyRegion.start() >= nextAddr,
4192              "returned region inconsistent?");
4193     }
4194     lastAddr = dirtyRegion.end();
4195     numDirtyCards =
4196       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4197 
4198     if (!dirtyRegion.is_empty()) {
4199       stopTimer();
4200       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4201       startTimer();
4202       sample_eden();
4203       verify_work_stacks_empty();
4204       verify_overflow_empty();
4205       HeapWord* stop_point =
4206         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4207       if (stop_point != NULL) {
4208         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4209                "Should only be AbortablePreclean.");
4210         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4211         if (should_abort_preclean()) {
4212           break; // out of preclean loop
4213         } else {
4214           // Compute the next address at which preclean should pick up.
4215           lastAddr = next_card_start_after_block(stop_point);
4216         }
4217       }
4218     } else {
4219       break;
4220     }
4221   }
4222   verify_work_stacks_empty();
4223   verify_overflow_empty();
4224   return cumNumDirtyCards;
4225 }
4226 
4227 class PrecleanKlassClosure : public KlassClosure {
4228   KlassToOopClosure _cm_klass_closure;
4229  public:
4230   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4231   void do_klass(Klass* k) {
4232     if (k->has_accumulated_modified_oops()) {
4233       k->clear_accumulated_modified_oops();
4234 
4235       _cm_klass_closure.do_klass(k);
4236     }
4237   }
4238 };
4239 
4240 // The freelist lock is needed to prevent asserts, is it really needed?
4241 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4242 
4243   cl->set_freelistLock(freelistLock);
4244 
4245   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4246 
4247   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4248   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4249   PrecleanKlassClosure preclean_klass_closure(cl);
4250   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4251 
4252   verify_work_stacks_empty();
4253   verify_overflow_empty();
4254 }
4255 
4256 void CMSCollector::checkpointRootsFinal() {
4257   assert(_collectorState == FinalMarking, "incorrect state transition?");
4258   check_correct_thread_executing();
4259   // world is stopped at this checkpoint
4260   assert(SafepointSynchronize::is_at_safepoint(),
4261          "world should be stopped");
4262   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4263 
4264   verify_work_stacks_empty();
4265   verify_overflow_empty();
4266 
4267   if (PrintGCDetails) {
4268     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4269                         _young_gen->used() / K,
4270                         _young_gen->capacity() / K);
4271   }
4272   {
4273     if (CMSScavengeBeforeRemark) {
4274       GenCollectedHeap* gch = GenCollectedHeap::heap();
4275       // Temporarily set flag to false, GCH->do_collection will
4276       // expect it to be false and set to true
4277       FlagSetting fl(gch->_is_gc_active, false);
4278       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4279         PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4280       int level = _cmsGen->level() - 1;
4281       if (level >= 0) {
4282         gch->do_collection(true,        // full (i.e. force, see below)
4283                            false,       // !clear_all_soft_refs
4284                            0,           // size
4285                            false,       // is_tlab
4286                            level        // max_level
4287                           );
4288       }
4289     }
4290     FreelistLocker x(this);
4291     MutexLockerEx y(bitMapLock(),
4292                     Mutex::_no_safepoint_check_flag);
4293     checkpointRootsFinalWork();
4294   }
4295   verify_work_stacks_empty();
4296   verify_overflow_empty();
4297 }
4298 
4299 void CMSCollector::checkpointRootsFinalWork() {
4300   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4301 
4302   assert(haveFreelistLocks(), "must have free list locks");
4303   assert_lock_strong(bitMapLock());
4304 
4305   ResourceMark rm;
4306   HandleMark   hm;
4307 
4308   GenCollectedHeap* gch = GenCollectedHeap::heap();
4309 
4310   if (should_unload_classes()) {
4311     CodeCache::gc_prologue();
4312   }
4313   assert(haveFreelistLocks(), "must have free list locks");
4314   assert_lock_strong(bitMapLock());
4315 
4316   // We might assume that we need not fill TLAB's when
4317   // CMSScavengeBeforeRemark is set, because we may have just done
4318   // a scavenge which would have filled all TLAB's -- and besides
4319   // Eden would be empty. This however may not always be the case --
4320   // for instance although we asked for a scavenge, it may not have
4321   // happened because of a JNI critical section. We probably need
4322   // a policy for deciding whether we can in that case wait until
4323   // the critical section releases and then do the remark following
4324   // the scavenge, and skip it here. In the absence of that policy,
4325   // or of an indication of whether the scavenge did indeed occur,
4326   // we cannot rely on TLAB's having been filled and must do
4327   // so here just in case a scavenge did not happen.
4328   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4329   // Update the saved marks which may affect the root scans.
4330   gch->save_marks();
4331 
4332   if (CMSPrintEdenSurvivorChunks) {
4333     print_eden_and_survivor_chunk_arrays();
4334   }
4335 
4336   {
4337     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4338 
4339     // Note on the role of the mod union table:
4340     // Since the marker in "markFromRoots" marks concurrently with
4341     // mutators, it is possible for some reachable objects not to have been
4342     // scanned. For instance, an only reference to an object A was
4343     // placed in object B after the marker scanned B. Unless B is rescanned,
4344     // A would be collected. Such updates to references in marked objects
4345     // are detected via the mod union table which is the set of all cards
4346     // dirtied since the first checkpoint in this GC cycle and prior to
4347     // the most recent young generation GC, minus those cleaned up by the
4348     // concurrent precleaning.
4349     if (CMSParallelRemarkEnabled) {
4350       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4351       do_remark_parallel();
4352     } else {
4353       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4354                   _gc_timer_cm, _gc_tracer_cm->gc_id());
4355       do_remark_non_parallel();
4356     }
4357   }
4358   verify_work_stacks_empty();
4359   verify_overflow_empty();
4360 
4361   {
4362     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4363     refProcessingWork();
4364   }
4365   verify_work_stacks_empty();
4366   verify_overflow_empty();
4367 
4368   if (should_unload_classes()) {
4369     CodeCache::gc_epilogue();
4370   }
4371   JvmtiExport::gc_epilogue();
4372 
4373   // If we encountered any (marking stack / work queue) overflow
4374   // events during the current CMS cycle, take appropriate
4375   // remedial measures, where possible, so as to try and avoid
4376   // recurrence of that condition.
4377   assert(_markStack.isEmpty(), "No grey objects");
4378   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4379                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4380   if (ser_ovflw > 0) {
4381     if (PrintCMSStatistics != 0) {
4382       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4383         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4384         ", kac_preclean="SIZE_FORMAT")",
4385         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4386         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4387     }
4388     _markStack.expand();
4389     _ser_pmc_remark_ovflw = 0;
4390     _ser_pmc_preclean_ovflw = 0;
4391     _ser_kac_preclean_ovflw = 0;
4392     _ser_kac_ovflw = 0;
4393   }
4394   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4395     if (PrintCMSStatistics != 0) {
4396       gclog_or_tty->print_cr("Work queue overflow (benign) "
4397         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4398         _par_pmc_remark_ovflw, _par_kac_ovflw);
4399     }
4400     _par_pmc_remark_ovflw = 0;
4401     _par_kac_ovflw = 0;
4402   }
4403   if (PrintCMSStatistics != 0) {
4404      if (_markStack._hit_limit > 0) {
4405        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4406                               _markStack._hit_limit);
4407      }
4408      if (_markStack._failed_double > 0) {
4409        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4410                               " current capacity "SIZE_FORMAT,
4411                               _markStack._failed_double,
4412                               _markStack.capacity());
4413      }
4414   }
4415   _markStack._hit_limit = 0;
4416   _markStack._failed_double = 0;
4417 
4418   if ((VerifyAfterGC || VerifyDuringGC) &&
4419       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4420     verify_after_remark();
4421   }
4422 
4423   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4424 
4425   // Change under the freelistLocks.
4426   _collectorState = Sweeping;
4427   // Call isAllClear() under bitMapLock
4428   assert(_modUnionTable.isAllClear(),
4429       "Should be clear by end of the final marking");
4430   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4431       "Should be clear by end of the final marking");
4432 }
4433 
4434 void CMSParInitialMarkTask::work(uint worker_id) {
4435   elapsedTimer _timer;
4436   ResourceMark rm;
4437   HandleMark   hm;
4438 
4439   // ---------- scan from roots --------------
4440   _timer.start();
4441   GenCollectedHeap* gch = GenCollectedHeap::heap();
4442   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4443 
4444   // ---------- young gen roots --------------
4445   {
4446     work_on_young_gen_roots(worker_id, &par_mri_cl);
4447     _timer.stop();
4448     if (PrintCMSStatistics != 0) {
4449       gclog_or_tty->print_cr(
4450         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4451         worker_id, _timer.seconds());
4452     }
4453   }
4454 
4455   // ---------- remaining roots --------------
4456   _timer.reset();
4457   _timer.start();
4458 
4459   CLDToOopClosure cld_closure(&par_mri_cl, true);
4460 
4461   gch->gen_process_roots(_strong_roots_scope,
4462                          _collector->_cmsGen->level(),
4463                          false,     // yg was scanned above
4464                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4465                          _collector->should_unload_classes(),
4466                          &par_mri_cl,
4467                          NULL,
4468                          &cld_closure);
4469   assert(_collector->should_unload_classes()
4470          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4471          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4472   _timer.stop();
4473   if (PrintCMSStatistics != 0) {
4474     gclog_or_tty->print_cr(
4475       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4476       worker_id, _timer.seconds());
4477   }
4478 }
4479 
4480 // Parallel remark task
4481 class CMSParRemarkTask: public CMSParMarkTask {
4482   CompactibleFreeListSpace* _cms_space;
4483 
4484   // The per-thread work queues, available here for stealing.
4485   OopTaskQueueSet*       _task_queues;
4486   ParallelTaskTerminator _term;
4487   StrongRootsScope*      _strong_roots_scope;
4488 
4489  public:
4490   // A value of 0 passed to n_workers will cause the number of
4491   // workers to be taken from the active workers in the work gang.
4492   CMSParRemarkTask(CMSCollector* collector,
4493                    CompactibleFreeListSpace* cms_space,
4494                    uint n_workers, FlexibleWorkGang* workers,
4495                    OopTaskQueueSet* task_queues,
4496                    StrongRootsScope* strong_roots_scope):
4497     CMSParMarkTask("Rescan roots and grey objects in parallel",
4498                    collector, n_workers),
4499     _cms_space(cms_space),
4500     _task_queues(task_queues),
4501     _term(n_workers, task_queues),
4502     _strong_roots_scope(strong_roots_scope) { }
4503 
4504   OopTaskQueueSet* task_queues() { return _task_queues; }
4505 
4506   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4507 
4508   ParallelTaskTerminator* terminator() { return &_term; }
4509   uint n_workers() { return _n_workers; }
4510 
4511   void work(uint worker_id);
4512 
4513  private:
4514   // ... of  dirty cards in old space
4515   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4516                                   Par_MarkRefsIntoAndScanClosure* cl);
4517 
4518   // ... work stealing for the above
4519   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4520 };
4521 
4522 class RemarkKlassClosure : public KlassClosure {
4523   KlassToOopClosure _cm_klass_closure;
4524  public:
4525   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4526   void do_klass(Klass* k) {
4527     // Check if we have modified any oops in the Klass during the concurrent marking.
4528     if (k->has_accumulated_modified_oops()) {
4529       k->clear_accumulated_modified_oops();
4530 
4531       // We could have transfered the current modified marks to the accumulated marks,
4532       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4533     } else if (k->has_modified_oops()) {
4534       // Don't clear anything, this info is needed by the next young collection.
4535     } else {
4536       // No modified oops in the Klass.
4537       return;
4538     }
4539 
4540     // The klass has modified fields, need to scan the klass.
4541     _cm_klass_closure.do_klass(k);
4542   }
4543 };
4544 
4545 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4546   ParNewGeneration* young_gen = _collector->_young_gen;
4547   ContiguousSpace* eden_space = young_gen->eden();
4548   ContiguousSpace* from_space = young_gen->from();
4549   ContiguousSpace* to_space   = young_gen->to();
4550 
4551   HeapWord** eca = _collector->_eden_chunk_array;
4552   size_t     ect = _collector->_eden_chunk_index;
4553   HeapWord** sca = _collector->_survivor_chunk_array;
4554   size_t     sct = _collector->_survivor_chunk_index;
4555 
4556   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4557   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4558 
4559   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4560   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4561   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4562 }
4563 
4564 // work_queue(i) is passed to the closure
4565 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4566 // also is passed to do_dirty_card_rescan_tasks() and to
4567 // do_work_steal() to select the i-th task_queue.
4568 
4569 void CMSParRemarkTask::work(uint worker_id) {
4570   elapsedTimer _timer;
4571   ResourceMark rm;
4572   HandleMark   hm;
4573 
4574   // ---------- rescan from roots --------------
4575   _timer.start();
4576   GenCollectedHeap* gch = GenCollectedHeap::heap();
4577   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4578     _collector->_span, _collector->ref_processor(),
4579     &(_collector->_markBitMap),
4580     work_queue(worker_id));
4581 
4582   // Rescan young gen roots first since these are likely
4583   // coarsely partitioned and may, on that account, constitute
4584   // the critical path; thus, it's best to start off that
4585   // work first.
4586   // ---------- young gen roots --------------
4587   {
4588     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4589     _timer.stop();
4590     if (PrintCMSStatistics != 0) {
4591       gclog_or_tty->print_cr(
4592         "Finished young gen rescan work in %dth thread: %3.3f sec",
4593         worker_id, _timer.seconds());
4594     }
4595   }
4596 
4597   // ---------- remaining roots --------------
4598   _timer.reset();
4599   _timer.start();
4600   gch->gen_process_roots(_strong_roots_scope,
4601                          _collector->_cmsGen->level(),
4602                          false,     // yg was scanned above
4603                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4604                          _collector->should_unload_classes(),
4605                          &par_mrias_cl,
4606                          NULL,
4607                          NULL);     // The dirty klasses will be handled below
4608 
4609   assert(_collector->should_unload_classes()
4610          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4611          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4612   _timer.stop();
4613   if (PrintCMSStatistics != 0) {
4614     gclog_or_tty->print_cr(
4615       "Finished remaining root rescan work in %dth thread: %3.3f sec",
4616       worker_id, _timer.seconds());
4617   }
4618 
4619   // ---------- unhandled CLD scanning ----------
4620   if (worker_id == 0) { // Single threaded at the moment.
4621     _timer.reset();
4622     _timer.start();
4623 
4624     // Scan all new class loader data objects and new dependencies that were
4625     // introduced during concurrent marking.
4626     ResourceMark rm;
4627     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4628     for (int i = 0; i < array->length(); i++) {
4629       par_mrias_cl.do_class_loader_data(array->at(i));
4630     }
4631 
4632     // We don't need to keep track of new CLDs anymore.
4633     ClassLoaderDataGraph::remember_new_clds(false);
4634 
4635     _timer.stop();
4636     if (PrintCMSStatistics != 0) {
4637       gclog_or_tty->print_cr(
4638           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4639           worker_id, _timer.seconds());
4640     }
4641   }
4642 
4643   // ---------- dirty klass scanning ----------
4644   if (worker_id == 0) { // Single threaded at the moment.
4645     _timer.reset();
4646     _timer.start();
4647 
4648     // Scan all classes that was dirtied during the concurrent marking phase.
4649     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4650     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4651 
4652     _timer.stop();
4653     if (PrintCMSStatistics != 0) {
4654       gclog_or_tty->print_cr(
4655           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4656           worker_id, _timer.seconds());
4657     }
4658   }
4659 
4660   // We might have added oops to ClassLoaderData::_handles during the
4661   // concurrent marking phase. These oops point to newly allocated objects
4662   // that are guaranteed to be kept alive. Either by the direct allocation
4663   // code, or when the young collector processes the roots. Hence,
4664   // we don't have to revisit the _handles block during the remark phase.
4665 
4666   // ---------- rescan dirty cards ------------
4667   _timer.reset();
4668   _timer.start();
4669 
4670   // Do the rescan tasks for each of the two spaces
4671   // (cms_space) in turn.
4672   // "worker_id" is passed to select the task_queue for "worker_id"
4673   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4674   _timer.stop();
4675   if (PrintCMSStatistics != 0) {
4676     gclog_or_tty->print_cr(
4677       "Finished dirty card rescan work in %dth thread: %3.3f sec",
4678       worker_id, _timer.seconds());
4679   }
4680 
4681   // ---------- steal work from other threads ...
4682   // ---------- ... and drain overflow list.
4683   _timer.reset();
4684   _timer.start();
4685   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4686   _timer.stop();
4687   if (PrintCMSStatistics != 0) {
4688     gclog_or_tty->print_cr(
4689       "Finished work stealing in %dth thread: %3.3f sec",
4690       worker_id, _timer.seconds());
4691   }
4692 }
4693 
4694 // Note that parameter "i" is not used.
4695 void
4696 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4697   OopsInGenClosure* cl, ContiguousSpace* space,
4698   HeapWord** chunk_array, size_t chunk_top) {
4699   // Until all tasks completed:
4700   // . claim an unclaimed task
4701   // . compute region boundaries corresponding to task claimed
4702   //   using chunk_array
4703   // . par_oop_iterate(cl) over that region
4704 
4705   ResourceMark rm;
4706   HandleMark   hm;
4707 
4708   SequentialSubTasksDone* pst = space->par_seq_tasks();
4709 
4710   uint nth_task = 0;
4711   uint n_tasks  = pst->n_tasks();
4712 
4713   if (n_tasks > 0) {
4714     assert(pst->valid(), "Uninitialized use?");
4715     HeapWord *start, *end;
4716     while (!pst->is_task_claimed(/* reference */ nth_task)) {
4717       // We claimed task # nth_task; compute its boundaries.
4718       if (chunk_top == 0) {  // no samples were taken
4719         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4720         start = space->bottom();
4721         end   = space->top();
4722       } else if (nth_task == 0) {
4723         start = space->bottom();
4724         end   = chunk_array[nth_task];
4725       } else if (nth_task < (uint)chunk_top) {
4726         assert(nth_task >= 1, "Control point invariant");
4727         start = chunk_array[nth_task - 1];
4728         end   = chunk_array[nth_task];
4729       } else {
4730         assert(nth_task == (uint)chunk_top, "Control point invariant");
4731         start = chunk_array[chunk_top - 1];
4732         end   = space->top();
4733       }
4734       MemRegion mr(start, end);
4735       // Verify that mr is in space
4736       assert(mr.is_empty() || space->used_region().contains(mr),
4737              "Should be in space");
4738       // Verify that "start" is an object boundary
4739       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4740              "Should be an oop");
4741       space->par_oop_iterate(mr, cl);
4742     }
4743     pst->all_tasks_completed();
4744   }
4745 }
4746 
4747 void
4748 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4749   CompactibleFreeListSpace* sp, int i,
4750   Par_MarkRefsIntoAndScanClosure* cl) {
4751   // Until all tasks completed:
4752   // . claim an unclaimed task
4753   // . compute region boundaries corresponding to task claimed
4754   // . transfer dirty bits ct->mut for that region
4755   // . apply rescanclosure to dirty mut bits for that region
4756 
4757   ResourceMark rm;
4758   HandleMark   hm;
4759 
4760   OopTaskQueue* work_q = work_queue(i);
4761   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4762   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4763   // CAUTION: This closure has state that persists across calls to
4764   // the work method dirty_range_iterate_clear() in that it has
4765   // embedded in it a (subtype of) UpwardsObjectClosure. The
4766   // use of that state in the embedded UpwardsObjectClosure instance
4767   // assumes that the cards are always iterated (even if in parallel
4768   // by several threads) in monotonically increasing order per each
4769   // thread. This is true of the implementation below which picks
4770   // card ranges (chunks) in monotonically increasing order globally
4771   // and, a-fortiori, in monotonically increasing order per thread
4772   // (the latter order being a subsequence of the former).
4773   // If the work code below is ever reorganized into a more chaotic
4774   // work-partitioning form than the current "sequential tasks"
4775   // paradigm, the use of that persistent state will have to be
4776   // revisited and modified appropriately. See also related
4777   // bug 4756801 work on which should examine this code to make
4778   // sure that the changes there do not run counter to the
4779   // assumptions made here and necessary for correctness and
4780   // efficiency. Note also that this code might yield inefficient
4781   // behavior in the case of very large objects that span one or
4782   // more work chunks. Such objects would potentially be scanned
4783   // several times redundantly. Work on 4756801 should try and
4784   // address that performance anomaly if at all possible. XXX
4785   MemRegion  full_span  = _collector->_span;
4786   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4787   MarkFromDirtyCardsClosure
4788     greyRescanClosure(_collector, full_span, // entire span of interest
4789                       sp, bm, work_q, cl);
4790 
4791   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4792   assert(pst->valid(), "Uninitialized use?");
4793   uint nth_task = 0;
4794   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4795   MemRegion span = sp->used_region();
4796   HeapWord* start_addr = span.start();
4797   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4798                                            alignment);
4799   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4800   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4801          start_addr, "Check alignment");
4802   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4803          chunk_size, "Check alignment");
4804 
4805   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4806     // Having claimed the nth_task, compute corresponding mem-region,
4807     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4808     // The alignment restriction ensures that we do not need any
4809     // synchronization with other gang-workers while setting or
4810     // clearing bits in thus chunk of the MUT.
4811     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4812                                     start_addr + (nth_task+1)*chunk_size);
4813     // The last chunk's end might be way beyond end of the
4814     // used region. In that case pull back appropriately.
4815     if (this_span.end() > end_addr) {
4816       this_span.set_end(end_addr);
4817       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4818     }
4819     // Iterate over the dirty cards covering this chunk, marking them
4820     // precleaned, and setting the corresponding bits in the mod union
4821     // table. Since we have been careful to partition at Card and MUT-word
4822     // boundaries no synchronization is needed between parallel threads.
4823     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4824                                                  &modUnionClosure);
4825 
4826     // Having transferred these marks into the modUnionTable,
4827     // rescan the marked objects on the dirty cards in the modUnionTable.
4828     // Even if this is at a synchronous collection, the initial marking
4829     // may have been done during an asynchronous collection so there
4830     // may be dirty bits in the mod-union table.
4831     _collector->_modUnionTable.dirty_range_iterate_clear(
4832                   this_span, &greyRescanClosure);
4833     _collector->_modUnionTable.verifyNoOneBitsInRange(
4834                                  this_span.start(),
4835                                  this_span.end());
4836   }
4837   pst->all_tasks_completed();  // declare that i am done
4838 }
4839 
4840 // . see if we can share work_queues with ParNew? XXX
4841 void
4842 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4843                                 int* seed) {
4844   OopTaskQueue* work_q = work_queue(i);
4845   NOT_PRODUCT(int num_steals = 0;)
4846   oop obj_to_scan;
4847   CMSBitMap* bm = &(_collector->_markBitMap);
4848 
4849   while (true) {
4850     // Completely finish any left over work from (an) earlier round(s)
4851     cl->trim_queue(0);
4852     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4853                                          (size_t)ParGCDesiredObjsFromOverflowList);
4854     // Now check if there's any work in the overflow list
4855     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4856     // only affects the number of attempts made to get work from the
4857     // overflow list and does not affect the number of workers.  Just
4858     // pass ParallelGCThreads so this behavior is unchanged.
4859     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4860                                                 work_q,
4861                                                 ParallelGCThreads)) {
4862       // found something in global overflow list;
4863       // not yet ready to go stealing work from others.
4864       // We'd like to assert(work_q->size() != 0, ...)
4865       // because we just took work from the overflow list,
4866       // but of course we can't since all of that could have
4867       // been already stolen from us.
4868       // "He giveth and He taketh away."
4869       continue;
4870     }
4871     // Verify that we have no work before we resort to stealing
4872     assert(work_q->size() == 0, "Have work, shouldn't steal");
4873     // Try to steal from other queues that have work
4874     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4875       NOT_PRODUCT(num_steals++;)
4876       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4877       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4878       // Do scanning work
4879       obj_to_scan->oop_iterate(cl);
4880       // Loop around, finish this work, and try to steal some more
4881     } else if (terminator()->offer_termination()) {
4882         break;  // nirvana from the infinite cycle
4883     }
4884   }
4885   NOT_PRODUCT(
4886     if (PrintCMSStatistics != 0) {
4887       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4888     }
4889   )
4890   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4891          "Else our work is not yet done");
4892 }
4893 
4894 // Record object boundaries in _eden_chunk_array by sampling the eden
4895 // top in the slow-path eden object allocation code path and record
4896 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4897 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4898 // sampling in sample_eden() that activates during the part of the
4899 // preclean phase.
4900 void CMSCollector::sample_eden_chunk() {
4901   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4902     if (_eden_chunk_lock->try_lock()) {
4903       // Record a sample. This is the critical section. The contents
4904       // of the _eden_chunk_array have to be non-decreasing in the
4905       // address order.
4906       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4907       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4908              "Unexpected state of Eden");
4909       if (_eden_chunk_index == 0 ||
4910           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4911            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4912                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4913         _eden_chunk_index++;  // commit sample
4914       }
4915       _eden_chunk_lock->unlock();
4916     }
4917   }
4918 }
4919 
4920 // Return a thread-local PLAB recording array, as appropriate.
4921 void* CMSCollector::get_data_recorder(int thr_num) {
4922   if (_survivor_plab_array != NULL &&
4923       (CMSPLABRecordAlways ||
4924        (_collectorState > Marking && _collectorState < FinalMarking))) {
4925     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4926     ChunkArray* ca = &_survivor_plab_array[thr_num];
4927     ca->reset();   // clear it so that fresh data is recorded
4928     return (void*) ca;
4929   } else {
4930     return NULL;
4931   }
4932 }
4933 
4934 // Reset all the thread-local PLAB recording arrays
4935 void CMSCollector::reset_survivor_plab_arrays() {
4936   for (uint i = 0; i < ParallelGCThreads; i++) {
4937     _survivor_plab_array[i].reset();
4938   }
4939 }
4940 
4941 // Merge the per-thread plab arrays into the global survivor chunk
4942 // array which will provide the partitioning of the survivor space
4943 // for CMS initial scan and rescan.
4944 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4945                                               int no_of_gc_threads) {
4946   assert(_survivor_plab_array  != NULL, "Error");
4947   assert(_survivor_chunk_array != NULL, "Error");
4948   assert(_collectorState == FinalMarking ||
4949          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4950   for (int j = 0; j < no_of_gc_threads; j++) {
4951     _cursor[j] = 0;
4952   }
4953   HeapWord* top = surv->top();
4954   size_t i;
4955   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4956     HeapWord* min_val = top;          // Higher than any PLAB address
4957     uint      min_tid = 0;            // position of min_val this round
4958     for (int j = 0; j < no_of_gc_threads; j++) {
4959       ChunkArray* cur_sca = &_survivor_plab_array[j];
4960       if (_cursor[j] == cur_sca->end()) {
4961         continue;
4962       }
4963       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4964       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4965       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4966       if (cur_val < min_val) {
4967         min_tid = j;
4968         min_val = cur_val;
4969       } else {
4970         assert(cur_val < top, "All recorded addresses should be less");
4971       }
4972     }
4973     // At this point min_val and min_tid are respectively
4974     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4975     // and the thread (j) that witnesses that address.
4976     // We record this address in the _survivor_chunk_array[i]
4977     // and increment _cursor[min_tid] prior to the next round i.
4978     if (min_val == top) {
4979       break;
4980     }
4981     _survivor_chunk_array[i] = min_val;
4982     _cursor[min_tid]++;
4983   }
4984   // We are all done; record the size of the _survivor_chunk_array
4985   _survivor_chunk_index = i; // exclusive: [0, i)
4986   if (PrintCMSStatistics > 0) {
4987     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4988   }
4989   // Verify that we used up all the recorded entries
4990   #ifdef ASSERT
4991     size_t total = 0;
4992     for (int j = 0; j < no_of_gc_threads; j++) {
4993       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4994       total += _cursor[j];
4995     }
4996     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4997     // Check that the merged array is in sorted order
4998     if (total > 0) {
4999       for (size_t i = 0; i < total - 1; i++) {
5000         if (PrintCMSStatistics > 0) {
5001           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5002                               i, p2i(_survivor_chunk_array[i]));
5003         }
5004         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5005                "Not sorted");
5006       }
5007     }
5008   #endif // ASSERT
5009 }
5010 
5011 // Set up the space's par_seq_tasks structure for work claiming
5012 // for parallel initial scan and rescan of young gen.
5013 // See ParRescanTask where this is currently used.
5014 void
5015 CMSCollector::
5016 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5017   assert(n_threads > 0, "Unexpected n_threads argument");
5018 
5019   // Eden space
5020   if (!_young_gen->eden()->is_empty()) {
5021     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5022     assert(!pst->valid(), "Clobbering existing data?");
5023     // Each valid entry in [0, _eden_chunk_index) represents a task.
5024     size_t n_tasks = _eden_chunk_index + 1;
5025     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5026     // Sets the condition for completion of the subtask (how many threads
5027     // need to finish in order to be done).
5028     pst->set_n_threads(n_threads);
5029     pst->set_n_tasks((int)n_tasks);
5030   }
5031 
5032   // Merge the survivor plab arrays into _survivor_chunk_array
5033   if (_survivor_plab_array != NULL) {
5034     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5035   } else {
5036     assert(_survivor_chunk_index == 0, "Error");
5037   }
5038 
5039   // To space
5040   {
5041     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5042     assert(!pst->valid(), "Clobbering existing data?");
5043     // Sets the condition for completion of the subtask (how many threads
5044     // need to finish in order to be done).
5045     pst->set_n_threads(n_threads);
5046     pst->set_n_tasks(1);
5047     assert(pst->valid(), "Error");
5048   }
5049 
5050   // From space
5051   {
5052     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5053     assert(!pst->valid(), "Clobbering existing data?");
5054     size_t n_tasks = _survivor_chunk_index + 1;
5055     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5056     // Sets the condition for completion of the subtask (how many threads
5057     // need to finish in order to be done).
5058     pst->set_n_threads(n_threads);
5059     pst->set_n_tasks((int)n_tasks);
5060     assert(pst->valid(), "Error");
5061   }
5062 }
5063 
5064 // Parallel version of remark
5065 void CMSCollector::do_remark_parallel() {
5066   GenCollectedHeap* gch = GenCollectedHeap::heap();
5067   FlexibleWorkGang* workers = gch->workers();
5068   assert(workers != NULL, "Need parallel worker threads.");
5069   // Choose to use the number of GC workers most recently set
5070   // into "active_workers".
5071   uint n_workers = workers->active_workers();
5072 
5073   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5074 
5075   StrongRootsScope srs(n_workers);
5076 
5077   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5078 
5079   // We won't be iterating over the cards in the card table updating
5080   // the younger_gen cards, so we shouldn't call the following else
5081   // the verification code as well as subsequent younger_refs_iterate
5082   // code would get confused. XXX
5083   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5084 
5085   // The young gen rescan work will not be done as part of
5086   // process_roots (which currently doesn't know how to
5087   // parallelize such a scan), but rather will be broken up into
5088   // a set of parallel tasks (via the sampling that the [abortable]
5089   // preclean phase did of eden, plus the [two] tasks of
5090   // scanning the [two] survivor spaces. Further fine-grain
5091   // parallelization of the scanning of the survivor spaces
5092   // themselves, and of precleaning of the younger gen itself
5093   // is deferred to the future.
5094   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5095 
5096   // The dirty card rescan work is broken up into a "sequence"
5097   // of parallel tasks (per constituent space) that are dynamically
5098   // claimed by the parallel threads.
5099   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5100 
5101   // It turns out that even when we're using 1 thread, doing the work in a
5102   // separate thread causes wide variance in run times.  We can't help this
5103   // in the multi-threaded case, but we special-case n=1 here to get
5104   // repeatable measurements of the 1-thread overhead of the parallel code.
5105   if (n_workers > 1) {
5106     // Make refs discovery MT-safe, if it isn't already: it may not
5107     // necessarily be so, since it's possible that we are doing
5108     // ST marking.
5109     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5110     workers->run_task(&tsk);
5111   } else {
5112     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5113     tsk.work(0);
5114   }
5115 
5116   // restore, single-threaded for now, any preserved marks
5117   // as a result of work_q overflow
5118   restore_preserved_marks_if_any();
5119 }
5120 
5121 // Non-parallel version of remark
5122 void CMSCollector::do_remark_non_parallel() {
5123   ResourceMark rm;
5124   HandleMark   hm;
5125   GenCollectedHeap* gch = GenCollectedHeap::heap();
5126   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5127 
5128   MarkRefsIntoAndScanClosure
5129     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5130              &_markStack, this,
5131              false /* should_yield */, false /* not precleaning */);
5132   MarkFromDirtyCardsClosure
5133     markFromDirtyCardsClosure(this, _span,
5134                               NULL,  // space is set further below
5135                               &_markBitMap, &_markStack, &mrias_cl);
5136   {
5137     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5138     // Iterate over the dirty cards, setting the corresponding bits in the
5139     // mod union table.
5140     {
5141       ModUnionClosure modUnionClosure(&_modUnionTable);
5142       _ct->ct_bs()->dirty_card_iterate(
5143                       _cmsGen->used_region(),
5144                       &modUnionClosure);
5145     }
5146     // Having transferred these marks into the modUnionTable, we just need
5147     // to rescan the marked objects on the dirty cards in the modUnionTable.
5148     // The initial marking may have been done during an asynchronous
5149     // collection so there may be dirty bits in the mod-union table.
5150     const int alignment =
5151       CardTableModRefBS::card_size * BitsPerWord;
5152     {
5153       // ... First handle dirty cards in CMS gen
5154       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5155       MemRegion ur = _cmsGen->used_region();
5156       HeapWord* lb = ur.start();
5157       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5158       MemRegion cms_span(lb, ub);
5159       _modUnionTable.dirty_range_iterate_clear(cms_span,
5160                                                &markFromDirtyCardsClosure);
5161       verify_work_stacks_empty();
5162       if (PrintCMSStatistics != 0) {
5163         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5164           markFromDirtyCardsClosure.num_dirty_cards());
5165       }
5166     }
5167   }
5168   if (VerifyDuringGC &&
5169       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5170     HandleMark hm;  // Discard invalid handles created during verification
5171     Universe::verify();
5172   }
5173   {
5174     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5175 
5176     verify_work_stacks_empty();
5177 
5178     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5179     StrongRootsScope srs(1);
5180 
5181     gch->gen_process_roots(&srs,
5182                            _cmsGen->level(),
5183                            true,  // younger gens as roots
5184                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5185                            should_unload_classes(),
5186                            &mrias_cl,
5187                            NULL,
5188                            NULL); // The dirty klasses will be handled below
5189 
5190     assert(should_unload_classes()
5191            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5192            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5193   }
5194 
5195   {
5196     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5197 
5198     verify_work_stacks_empty();
5199 
5200     // Scan all class loader data objects that might have been introduced
5201     // during concurrent marking.
5202     ResourceMark rm;
5203     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5204     for (int i = 0; i < array->length(); i++) {
5205       mrias_cl.do_class_loader_data(array->at(i));
5206     }
5207 
5208     // We don't need to keep track of new CLDs anymore.
5209     ClassLoaderDataGraph::remember_new_clds(false);
5210 
5211     verify_work_stacks_empty();
5212   }
5213 
5214   {
5215     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5216 
5217     verify_work_stacks_empty();
5218 
5219     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5220     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5221 
5222     verify_work_stacks_empty();
5223   }
5224 
5225   // We might have added oops to ClassLoaderData::_handles during the
5226   // concurrent marking phase. These oops point to newly allocated objects
5227   // that are guaranteed to be kept alive. Either by the direct allocation
5228   // code, or when the young collector processes the roots. Hence,
5229   // we don't have to revisit the _handles block during the remark phase.
5230 
5231   verify_work_stacks_empty();
5232   // Restore evacuated mark words, if any, used for overflow list links
5233   if (!CMSOverflowEarlyRestoration) {
5234     restore_preserved_marks_if_any();
5235   }
5236   verify_overflow_empty();
5237 }
5238 
5239 ////////////////////////////////////////////////////////
5240 // Parallel Reference Processing Task Proxy Class
5241 ////////////////////////////////////////////////////////
5242 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5243   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5244   CMSCollector*          _collector;
5245   CMSBitMap*             _mark_bit_map;
5246   const MemRegion        _span;
5247   ProcessTask&           _task;
5248 
5249 public:
5250   CMSRefProcTaskProxy(ProcessTask&     task,
5251                       CMSCollector*    collector,
5252                       const MemRegion& span,
5253                       CMSBitMap*       mark_bit_map,
5254                       AbstractWorkGang* workers,
5255                       OopTaskQueueSet* task_queues):
5256     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5257       task_queues,
5258       workers->active_workers()),
5259     _task(task),
5260     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5261   {
5262     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5263            "Inconsistency in _span");
5264   }
5265 
5266   OopTaskQueueSet* task_queues() { return queues(); }
5267 
5268   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5269 
5270   void do_work_steal(int i,
5271                      CMSParDrainMarkingStackClosure* drain,
5272                      CMSParKeepAliveClosure* keep_alive,
5273                      int* seed);
5274 
5275   virtual void work(uint worker_id);
5276 };
5277 
5278 void CMSRefProcTaskProxy::work(uint worker_id) {
5279   ResourceMark rm;
5280   HandleMark hm;
5281   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5282   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5283                                         _mark_bit_map,
5284                                         work_queue(worker_id));
5285   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5286                                                  _mark_bit_map,
5287                                                  work_queue(worker_id));
5288   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5289   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5290   if (_task.marks_oops_alive()) {
5291     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5292                   _collector->hash_seed(worker_id));
5293   }
5294   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5295   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5296 }
5297 
5298 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5299   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5300   EnqueueTask& _task;
5301 
5302 public:
5303   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5304     : AbstractGangTask("Enqueue reference objects in parallel"),
5305       _task(task)
5306   { }
5307 
5308   virtual void work(uint worker_id)
5309   {
5310     _task.work(worker_id);
5311   }
5312 };
5313 
5314 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5315   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5316    _span(span),
5317    _bit_map(bit_map),
5318    _work_queue(work_queue),
5319    _mark_and_push(collector, span, bit_map, work_queue),
5320    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5321                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5322 { }
5323 
5324 // . see if we can share work_queues with ParNew? XXX
5325 void CMSRefProcTaskProxy::do_work_steal(int i,
5326   CMSParDrainMarkingStackClosure* drain,
5327   CMSParKeepAliveClosure* keep_alive,
5328   int* seed) {
5329   OopTaskQueue* work_q = work_queue(i);
5330   NOT_PRODUCT(int num_steals = 0;)
5331   oop obj_to_scan;
5332 
5333   while (true) {
5334     // Completely finish any left over work from (an) earlier round(s)
5335     drain->trim_queue(0);
5336     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5337                                          (size_t)ParGCDesiredObjsFromOverflowList);
5338     // Now check if there's any work in the overflow list
5339     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5340     // only affects the number of attempts made to get work from the
5341     // overflow list and does not affect the number of workers.  Just
5342     // pass ParallelGCThreads so this behavior is unchanged.
5343     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5344                                                 work_q,
5345                                                 ParallelGCThreads)) {
5346       // Found something in global overflow list;
5347       // not yet ready to go stealing work from others.
5348       // We'd like to assert(work_q->size() != 0, ...)
5349       // because we just took work from the overflow list,
5350       // but of course we can't, since all of that might have
5351       // been already stolen from us.
5352       continue;
5353     }
5354     // Verify that we have no work before we resort to stealing
5355     assert(work_q->size() == 0, "Have work, shouldn't steal");
5356     // Try to steal from other queues that have work
5357     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5358       NOT_PRODUCT(num_steals++;)
5359       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5360       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5361       // Do scanning work
5362       obj_to_scan->oop_iterate(keep_alive);
5363       // Loop around, finish this work, and try to steal some more
5364     } else if (terminator()->offer_termination()) {
5365       break;  // nirvana from the infinite cycle
5366     }
5367   }
5368   NOT_PRODUCT(
5369     if (PrintCMSStatistics != 0) {
5370       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5371     }
5372   )
5373 }
5374 
5375 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5376 {
5377   GenCollectedHeap* gch = GenCollectedHeap::heap();
5378   FlexibleWorkGang* workers = gch->workers();
5379   assert(workers != NULL, "Need parallel worker threads.");
5380   CMSRefProcTaskProxy rp_task(task, &_collector,
5381                               _collector.ref_processor()->span(),
5382                               _collector.markBitMap(),
5383                               workers, _collector.task_queues());
5384   workers->run_task(&rp_task);
5385 }
5386 
5387 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5388 {
5389 
5390   GenCollectedHeap* gch = GenCollectedHeap::heap();
5391   FlexibleWorkGang* workers = gch->workers();
5392   assert(workers != NULL, "Need parallel worker threads.");
5393   CMSRefEnqueueTaskProxy enq_task(task);
5394   workers->run_task(&enq_task);
5395 }
5396 
5397 void CMSCollector::refProcessingWork() {
5398   ResourceMark rm;
5399   HandleMark   hm;
5400 
5401   ReferenceProcessor* rp = ref_processor();
5402   assert(rp->span().equals(_span), "Spans should be equal");
5403   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5404   // Process weak references.
5405   rp->setup_policy(false);
5406   verify_work_stacks_empty();
5407 
5408   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5409                                           &_markStack, false /* !preclean */);
5410   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5411                                 _span, &_markBitMap, &_markStack,
5412                                 &cmsKeepAliveClosure, false /* !preclean */);
5413   {
5414     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5415 
5416     ReferenceProcessorStats stats;
5417     if (rp->processing_is_mt()) {
5418       // Set the degree of MT here.  If the discovery is done MT, there
5419       // may have been a different number of threads doing the discovery
5420       // and a different number of discovered lists may have Ref objects.
5421       // That is OK as long as the Reference lists are balanced (see
5422       // balance_all_queues() and balance_queues()).
5423       GenCollectedHeap* gch = GenCollectedHeap::heap();
5424       uint active_workers = ParallelGCThreads;
5425       FlexibleWorkGang* workers = gch->workers();
5426       if (workers != NULL) {
5427         active_workers = workers->active_workers();
5428         // The expectation is that active_workers will have already
5429         // been set to a reasonable value.  If it has not been set,
5430         // investigate.
5431         assert(active_workers > 0, "Should have been set during scavenge");
5432       }
5433       rp->set_active_mt_degree(active_workers);
5434       CMSRefProcTaskExecutor task_executor(*this);
5435       stats = rp->process_discovered_references(&_is_alive_closure,
5436                                         &cmsKeepAliveClosure,
5437                                         &cmsDrainMarkingStackClosure,
5438                                         &task_executor,
5439                                         _gc_timer_cm,
5440                                         _gc_tracer_cm->gc_id());
5441     } else {
5442       stats = rp->process_discovered_references(&_is_alive_closure,
5443                                         &cmsKeepAliveClosure,
5444                                         &cmsDrainMarkingStackClosure,
5445                                         NULL,
5446                                         _gc_timer_cm,
5447                                         _gc_tracer_cm->gc_id());
5448     }
5449     _gc_tracer_cm->report_gc_reference_stats(stats);
5450 
5451   }
5452 
5453   // This is the point where the entire marking should have completed.
5454   verify_work_stacks_empty();
5455 
5456   if (should_unload_classes()) {
5457     {
5458       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5459 
5460       // Unload classes and purge the SystemDictionary.
5461       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5462 
5463       // Unload nmethods.
5464       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5465 
5466       // Prune dead klasses from subklass/sibling/implementor lists.
5467       Klass::clean_weak_klass_links(&_is_alive_closure);
5468     }
5469 
5470     {
5471       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5472       // Clean up unreferenced symbols in symbol table.
5473       SymbolTable::unlink();
5474     }
5475 
5476     {
5477       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5478       // Delete entries for dead interned strings.
5479       StringTable::unlink(&_is_alive_closure);
5480     }
5481   }
5482 
5483 
5484   // Restore any preserved marks as a result of mark stack or
5485   // work queue overflow
5486   restore_preserved_marks_if_any();  // done single-threaded for now
5487 
5488   rp->set_enqueuing_is_done(true);
5489   if (rp->processing_is_mt()) {
5490     rp->balance_all_queues();
5491     CMSRefProcTaskExecutor task_executor(*this);
5492     rp->enqueue_discovered_references(&task_executor);
5493   } else {
5494     rp->enqueue_discovered_references(NULL);
5495   }
5496   rp->verify_no_references_recorded();
5497   assert(!rp->discovery_enabled(), "should have been disabled");
5498 }
5499 
5500 #ifndef PRODUCT
5501 void CMSCollector::check_correct_thread_executing() {
5502   Thread* t = Thread::current();
5503   // Only the VM thread or the CMS thread should be here.
5504   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5505          "Unexpected thread type");
5506   // If this is the vm thread, the foreground process
5507   // should not be waiting.  Note that _foregroundGCIsActive is
5508   // true while the foreground collector is waiting.
5509   if (_foregroundGCShouldWait) {
5510     // We cannot be the VM thread
5511     assert(t->is_ConcurrentGC_thread(),
5512            "Should be CMS thread");
5513   } else {
5514     // We can be the CMS thread only if we are in a stop-world
5515     // phase of CMS collection.
5516     if (t->is_ConcurrentGC_thread()) {
5517       assert(_collectorState == InitialMarking ||
5518              _collectorState == FinalMarking,
5519              "Should be a stop-world phase");
5520       // The CMS thread should be holding the CMS_token.
5521       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5522              "Potential interference with concurrently "
5523              "executing VM thread");
5524     }
5525   }
5526 }
5527 #endif
5528 
5529 void CMSCollector::sweep() {
5530   assert(_collectorState == Sweeping, "just checking");
5531   check_correct_thread_executing();
5532   verify_work_stacks_empty();
5533   verify_overflow_empty();
5534   increment_sweep_count();
5535   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5536 
5537   _inter_sweep_timer.stop();
5538   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5539 
5540   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5541   _intra_sweep_timer.reset();
5542   _intra_sweep_timer.start();
5543   {
5544     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5545     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5546     // First sweep the old gen
5547     {
5548       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5549                                bitMapLock());
5550       sweepWork(_cmsGen);
5551     }
5552 
5553     // Update Universe::_heap_*_at_gc figures.
5554     // We need all the free list locks to make the abstract state
5555     // transition from Sweeping to Resetting. See detailed note
5556     // further below.
5557     {
5558       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5559       // Update heap occupancy information which is used as
5560       // input to soft ref clearing policy at the next gc.
5561       Universe::update_heap_info_at_gc();
5562       _collectorState = Resizing;
5563     }
5564   }
5565   verify_work_stacks_empty();
5566   verify_overflow_empty();
5567 
5568   if (should_unload_classes()) {
5569     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5570     // requires that the virtual spaces are stable and not deleted.
5571     ClassLoaderDataGraph::set_should_purge(true);
5572   }
5573 
5574   _intra_sweep_timer.stop();
5575   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5576 
5577   _inter_sweep_timer.reset();
5578   _inter_sweep_timer.start();
5579 
5580   // We need to use a monotonically non-decreasing time in ms
5581   // or we will see time-warp warnings and os::javaTimeMillis()
5582   // does not guarantee monotonicity.
5583   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5584   update_time_of_last_gc(now);
5585 
5586   // NOTE on abstract state transitions:
5587   // Mutators allocate-live and/or mark the mod-union table dirty
5588   // based on the state of the collection.  The former is done in
5589   // the interval [Marking, Sweeping] and the latter in the interval
5590   // [Marking, Sweeping).  Thus the transitions into the Marking state
5591   // and out of the Sweeping state must be synchronously visible
5592   // globally to the mutators.
5593   // The transition into the Marking state happens with the world
5594   // stopped so the mutators will globally see it.  Sweeping is
5595   // done asynchronously by the background collector so the transition
5596   // from the Sweeping state to the Resizing state must be done
5597   // under the freelistLock (as is the check for whether to
5598   // allocate-live and whether to dirty the mod-union table).
5599   assert(_collectorState == Resizing, "Change of collector state to"
5600     " Resizing must be done under the freelistLocks (plural)");
5601 
5602   // Now that sweeping has been completed, we clear
5603   // the incremental_collection_failed flag,
5604   // thus inviting a younger gen collection to promote into
5605   // this generation. If such a promotion may still fail,
5606   // the flag will be set again when a young collection is
5607   // attempted.
5608   GenCollectedHeap* gch = GenCollectedHeap::heap();
5609   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5610   gch->update_full_collections_completed(_collection_count_start);
5611 }
5612 
5613 // FIX ME!!! Looks like this belongs in CFLSpace, with
5614 // CMSGen merely delegating to it.
5615 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5616   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5617   HeapWord*  minAddr        = _cmsSpace->bottom();
5618   HeapWord*  largestAddr    =
5619     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5620   if (largestAddr == NULL) {
5621     // The dictionary appears to be empty.  In this case
5622     // try to coalesce at the end of the heap.
5623     largestAddr = _cmsSpace->end();
5624   }
5625   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5626   size_t nearLargestOffset =
5627     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5628   if (PrintFLSStatistics != 0) {
5629     gclog_or_tty->print_cr(
5630       "CMS: Large Block: " PTR_FORMAT ";"
5631       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5632       p2i(largestAddr),
5633       p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5634   }
5635   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5636 }
5637 
5638 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5639   return addr >= _cmsSpace->nearLargestChunk();
5640 }
5641 
5642 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5643   return _cmsSpace->find_chunk_at_end();
5644 }
5645 
5646 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5647                                                     bool full) {
5648   // The next lower level has been collected.  Gather any statistics
5649   // that are of interest at this point.
5650   if (!full && (current_level + 1) == level()) {
5651     // Gather statistics on the young generation collection.
5652     collector()->stats().record_gc0_end(used());
5653   }
5654 }
5655 
5656 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5657   // We iterate over the space(s) underlying this generation,
5658   // checking the mark bit map to see if the bits corresponding
5659   // to specific blocks are marked or not. Blocks that are
5660   // marked are live and are not swept up. All remaining blocks
5661   // are swept up, with coalescing on-the-fly as we sweep up
5662   // contiguous free and/or garbage blocks:
5663   // We need to ensure that the sweeper synchronizes with allocators
5664   // and stop-the-world collectors. In particular, the following
5665   // locks are used:
5666   // . CMS token: if this is held, a stop the world collection cannot occur
5667   // . freelistLock: if this is held no allocation can occur from this
5668   //                 generation by another thread
5669   // . bitMapLock: if this is held, no other thread can access or update
5670   //
5671 
5672   // Note that we need to hold the freelistLock if we use
5673   // block iterate below; else the iterator might go awry if
5674   // a mutator (or promotion) causes block contents to change
5675   // (for instance if the allocator divvies up a block).
5676   // If we hold the free list lock, for all practical purposes
5677   // young generation GC's can't occur (they'll usually need to
5678   // promote), so we might as well prevent all young generation
5679   // GC's while we do a sweeping step. For the same reason, we might
5680   // as well take the bit map lock for the entire duration
5681 
5682   // check that we hold the requisite locks
5683   assert(have_cms_token(), "Should hold cms token");
5684   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5685   assert_lock_strong(gen->freelistLock());
5686   assert_lock_strong(bitMapLock());
5687 
5688   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5689   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5690   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5691                                       _inter_sweep_estimate.padded_average(),
5692                                       _intra_sweep_estimate.padded_average());
5693   gen->setNearLargestChunk();
5694 
5695   {
5696     SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
5697     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5698     // We need to free-up/coalesce garbage/blocks from a
5699     // co-terminal free run. This is done in the SweepClosure
5700     // destructor; so, do not remove this scope, else the
5701     // end-of-sweep-census below will be off by a little bit.
5702   }
5703   gen->cmsSpace()->sweep_completed();
5704   gen->cmsSpace()->endSweepFLCensus(sweep_count());
5705   if (should_unload_classes()) {                // unloaded classes this cycle,
5706     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5707   } else {                                      // did not unload classes,
5708     _concurrent_cycles_since_last_unload++;     // ... increment count
5709   }
5710 }
5711 
5712 // Reset CMS data structures (for now just the marking bit map)
5713 // preparatory for the next cycle.
5714 void CMSCollector::reset(bool concurrent) {
5715   if (concurrent) {
5716     CMSTokenSyncWithLocks ts(true, bitMapLock());
5717 
5718     // If the state is not "Resetting", the foreground  thread
5719     // has done a collection and the resetting.
5720     if (_collectorState != Resetting) {
5721       assert(_collectorState == Idling, "The state should only change"
5722         " because the foreground collector has finished the collection");
5723       return;
5724     }
5725 
5726     // Clear the mark bitmap (no grey objects to start with)
5727     // for the next cycle.
5728     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5729     CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5730 
5731     HeapWord* curAddr = _markBitMap.startWord();
5732     while (curAddr < _markBitMap.endWord()) {
5733       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5734       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5735       _markBitMap.clear_large_range(chunk);
5736       if (ConcurrentMarkSweepThread::should_yield() &&
5737           !foregroundGCIsActive() &&
5738           CMSYield) {
5739         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5740                "CMS thread should hold CMS token");
5741         assert_lock_strong(bitMapLock());
5742         bitMapLock()->unlock();
5743         ConcurrentMarkSweepThread::desynchronize(true);
5744         stopTimer();
5745         if (PrintCMSStatistics != 0) {
5746           incrementYields();
5747         }
5748 
5749         // See the comment in coordinator_yield()
5750         for (unsigned i = 0; i < CMSYieldSleepCount &&
5751                          ConcurrentMarkSweepThread::should_yield() &&
5752                          !CMSCollector::foregroundGCIsActive(); ++i) {
5753           os::sleep(Thread::current(), 1, false);
5754         }
5755 
5756         ConcurrentMarkSweepThread::synchronize(true);
5757         bitMapLock()->lock_without_safepoint_check();
5758         startTimer();
5759       }
5760       curAddr = chunk.end();
5761     }
5762     // A successful mostly concurrent collection has been done.
5763     // Because only the full (i.e., concurrent mode failure) collections
5764     // are being measured for gc overhead limits, clean the "near" flag
5765     // and count.
5766     size_policy()->reset_gc_overhead_limit_count();
5767     _collectorState = Idling;
5768   } else {
5769     // already have the lock
5770     assert(_collectorState == Resetting, "just checking");
5771     assert_lock_strong(bitMapLock());
5772     _markBitMap.clear_all();
5773     _collectorState = Idling;
5774   }
5775 
5776   register_gc_end();
5777 }
5778 
5779 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5780   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5781   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5782   TraceCollectorStats tcs(counters());
5783 
5784   switch (op) {
5785     case CMS_op_checkpointRootsInitial: {
5786       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5787       checkpointRootsInitial();
5788       if (PrintGC) {
5789         _cmsGen->printOccupancy("initial-mark");
5790       }
5791       break;
5792     }
5793     case CMS_op_checkpointRootsFinal: {
5794       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5795       checkpointRootsFinal();
5796       if (PrintGC) {
5797         _cmsGen->printOccupancy("remark");
5798       }
5799       break;
5800     }
5801     default:
5802       fatal("No such CMS_op");
5803   }
5804 }
5805 
5806 #ifndef PRODUCT
5807 size_t const CMSCollector::skip_header_HeapWords() {
5808   return FreeChunk::header_size();
5809 }
5810 
5811 // Try and collect here conditions that should hold when
5812 // CMS thread is exiting. The idea is that the foreground GC
5813 // thread should not be blocked if it wants to terminate
5814 // the CMS thread and yet continue to run the VM for a while
5815 // after that.
5816 void CMSCollector::verify_ok_to_terminate() const {
5817   assert(Thread::current()->is_ConcurrentGC_thread(),
5818          "should be called by CMS thread");
5819   assert(!_foregroundGCShouldWait, "should be false");
5820   // We could check here that all the various low-level locks
5821   // are not held by the CMS thread, but that is overkill; see
5822   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5823   // is checked.
5824 }
5825 #endif
5826 
5827 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5828    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5829           "missing Printezis mark?");
5830   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5831   size_t size = pointer_delta(nextOneAddr + 1, addr);
5832   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5833          "alignment problem");
5834   assert(size >= 3, "Necessary for Printezis marks to work");
5835   return size;
5836 }
5837 
5838 // A variant of the above (block_size_using_printezis_bits()) except
5839 // that we return 0 if the P-bits are not yet set.
5840 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5841   if (_markBitMap.isMarked(addr + 1)) {
5842     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5843     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5844     size_t size = pointer_delta(nextOneAddr + 1, addr);
5845     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5846            "alignment problem");
5847     assert(size >= 3, "Necessary for Printezis marks to work");
5848     return size;
5849   }
5850   return 0;
5851 }
5852 
5853 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5854   size_t sz = 0;
5855   oop p = (oop)addr;
5856   if (p->klass_or_null() != NULL) {
5857     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5858   } else {
5859     sz = block_size_using_printezis_bits(addr);
5860   }
5861   assert(sz > 0, "size must be nonzero");
5862   HeapWord* next_block = addr + sz;
5863   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5864                                              CardTableModRefBS::card_size);
5865   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5866          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5867          "must be different cards");
5868   return next_card;
5869 }
5870 
5871 
5872 // CMS Bit Map Wrapper /////////////////////////////////////////
5873 
5874 // Construct a CMS bit map infrastructure, but don't create the
5875 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5876 // further below.
5877 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5878   _bm(),
5879   _shifter(shifter),
5880   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5881                                     Monitor::_safepoint_check_sometimes) : NULL)
5882 {
5883   _bmStartWord = 0;
5884   _bmWordSize  = 0;
5885 }
5886 
5887 bool CMSBitMap::allocate(MemRegion mr) {
5888   _bmStartWord = mr.start();
5889   _bmWordSize  = mr.word_size();
5890   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5891                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5892   if (!brs.is_reserved()) {
5893     warning("CMS bit map allocation failure");
5894     return false;
5895   }
5896   // For now we'll just commit all of the bit map up front.
5897   // Later on we'll try to be more parsimonious with swap.
5898   if (!_virtual_space.initialize(brs, brs.size())) {
5899     warning("CMS bit map backing store failure");
5900     return false;
5901   }
5902   assert(_virtual_space.committed_size() == brs.size(),
5903          "didn't reserve backing store for all of CMS bit map?");
5904   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5905   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5906          _bmWordSize, "inconsistency in bit map sizing");
5907   _bm.set_size(_bmWordSize >> _shifter);
5908 
5909   // bm.clear(); // can we rely on getting zero'd memory? verify below
5910   assert(isAllClear(),
5911          "Expected zero'd memory from ReservedSpace constructor");
5912   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5913          "consistency check");
5914   return true;
5915 }
5916 
5917 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5918   HeapWord *next_addr, *end_addr, *last_addr;
5919   assert_locked();
5920   assert(covers(mr), "out-of-range error");
5921   // XXX assert that start and end are appropriately aligned
5922   for (next_addr = mr.start(), end_addr = mr.end();
5923        next_addr < end_addr; next_addr = last_addr) {
5924     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5925     last_addr = dirty_region.end();
5926     if (!dirty_region.is_empty()) {
5927       cl->do_MemRegion(dirty_region);
5928     } else {
5929       assert(last_addr == end_addr, "program logic");
5930       return;
5931     }
5932   }
5933 }
5934 
5935 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5936   _bm.print_on_error(st, prefix);
5937 }
5938 
5939 #ifndef PRODUCT
5940 void CMSBitMap::assert_locked() const {
5941   CMSLockVerifier::assert_locked(lock());
5942 }
5943 
5944 bool CMSBitMap::covers(MemRegion mr) const {
5945   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5946   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5947          "size inconsistency");
5948   return (mr.start() >= _bmStartWord) &&
5949          (mr.end()   <= endWord());
5950 }
5951 
5952 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5953     return (start >= _bmStartWord && (start + size) <= endWord());
5954 }
5955 
5956 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5957   // verify that there are no 1 bits in the interval [left, right)
5958   FalseBitMapClosure falseBitMapClosure;
5959   iterate(&falseBitMapClosure, left, right);
5960 }
5961 
5962 void CMSBitMap::region_invariant(MemRegion mr)
5963 {
5964   assert_locked();
5965   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5966   assert(!mr.is_empty(), "unexpected empty region");
5967   assert(covers(mr), "mr should be covered by bit map");
5968   // convert address range into offset range
5969   size_t start_ofs = heapWordToOffset(mr.start());
5970   // Make sure that end() is appropriately aligned
5971   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5972                         (1 << (_shifter+LogHeapWordSize))),
5973          "Misaligned mr.end()");
5974   size_t end_ofs   = heapWordToOffset(mr.end());
5975   assert(end_ofs > start_ofs, "Should mark at least one bit");
5976 }
5977 
5978 #endif
5979 
5980 bool CMSMarkStack::allocate(size_t size) {
5981   // allocate a stack of the requisite depth
5982   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5983                    size * sizeof(oop)));
5984   if (!rs.is_reserved()) {
5985     warning("CMSMarkStack allocation failure");
5986     return false;
5987   }
5988   if (!_virtual_space.initialize(rs, rs.size())) {
5989     warning("CMSMarkStack backing store failure");
5990     return false;
5991   }
5992   assert(_virtual_space.committed_size() == rs.size(),
5993          "didn't reserve backing store for all of CMS stack?");
5994   _base = (oop*)(_virtual_space.low());
5995   _index = 0;
5996   _capacity = size;
5997   NOT_PRODUCT(_max_depth = 0);
5998   return true;
5999 }
6000 
6001 // XXX FIX ME !!! In the MT case we come in here holding a
6002 // leaf lock. For printing we need to take a further lock
6003 // which has lower rank. We need to recalibrate the two
6004 // lock-ranks involved in order to be able to print the
6005 // messages below. (Or defer the printing to the caller.
6006 // For now we take the expedient path of just disabling the
6007 // messages for the problematic case.)
6008 void CMSMarkStack::expand() {
6009   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6010   if (_capacity == MarkStackSizeMax) {
6011     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6012       // We print a warning message only once per CMS cycle.
6013       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6014     }
6015     return;
6016   }
6017   // Double capacity if possible
6018   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6019   // Do not give up existing stack until we have managed to
6020   // get the double capacity that we desired.
6021   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6022                    new_capacity * sizeof(oop)));
6023   if (rs.is_reserved()) {
6024     // Release the backing store associated with old stack
6025     _virtual_space.release();
6026     // Reinitialize virtual space for new stack
6027     if (!_virtual_space.initialize(rs, rs.size())) {
6028       fatal("Not enough swap for expanded marking stack");
6029     }
6030     _base = (oop*)(_virtual_space.low());
6031     _index = 0;
6032     _capacity = new_capacity;
6033   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6034     // Failed to double capacity, continue;
6035     // we print a detail message only once per CMS cycle.
6036     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6037             SIZE_FORMAT"K",
6038             _capacity / K, new_capacity / K);
6039   }
6040 }
6041 
6042 
6043 // Closures
6044 // XXX: there seems to be a lot of code  duplication here;
6045 // should refactor and consolidate common code.
6046 
6047 // This closure is used to mark refs into the CMS generation in
6048 // the CMS bit map. Called at the first checkpoint. This closure
6049 // assumes that we do not need to re-mark dirty cards; if the CMS
6050 // generation on which this is used is not an oldest
6051 // generation then this will lose younger_gen cards!
6052 
6053 MarkRefsIntoClosure::MarkRefsIntoClosure(
6054   MemRegion span, CMSBitMap* bitMap):
6055     _span(span),
6056     _bitMap(bitMap)
6057 {
6058     assert(_ref_processor == NULL, "deliberately left NULL");
6059     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6060 }
6061 
6062 void MarkRefsIntoClosure::do_oop(oop obj) {
6063   // if p points into _span, then mark corresponding bit in _markBitMap
6064   assert(obj->is_oop(), "expected an oop");
6065   HeapWord* addr = (HeapWord*)obj;
6066   if (_span.contains(addr)) {
6067     // this should be made more efficient
6068     _bitMap->mark(addr);
6069   }
6070 }
6071 
6072 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6073 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6074 
6075 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6076   MemRegion span, CMSBitMap* bitMap):
6077     _span(span),
6078     _bitMap(bitMap)
6079 {
6080     assert(_ref_processor == NULL, "deliberately left NULL");
6081     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6082 }
6083 
6084 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6085   // if p points into _span, then mark corresponding bit in _markBitMap
6086   assert(obj->is_oop(), "expected an oop");
6087   HeapWord* addr = (HeapWord*)obj;
6088   if (_span.contains(addr)) {
6089     // this should be made more efficient
6090     _bitMap->par_mark(addr);
6091   }
6092 }
6093 
6094 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6095 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6096 
6097 // A variant of the above, used for CMS marking verification.
6098 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6099   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6100     _span(span),
6101     _verification_bm(verification_bm),
6102     _cms_bm(cms_bm)
6103 {
6104     assert(_ref_processor == NULL, "deliberately left NULL");
6105     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6106 }
6107 
6108 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6109   // if p points into _span, then mark corresponding bit in _markBitMap
6110   assert(obj->is_oop(), "expected an oop");
6111   HeapWord* addr = (HeapWord*)obj;
6112   if (_span.contains(addr)) {
6113     _verification_bm->mark(addr);
6114     if (!_cms_bm->isMarked(addr)) {
6115       oop(addr)->print();
6116       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6117       fatal("... aborting");
6118     }
6119   }
6120 }
6121 
6122 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6123 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6124 
6125 //////////////////////////////////////////////////
6126 // MarkRefsIntoAndScanClosure
6127 //////////////////////////////////////////////////
6128 
6129 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6130                                                        ReferenceProcessor* rp,
6131                                                        CMSBitMap* bit_map,
6132                                                        CMSBitMap* mod_union_table,
6133                                                        CMSMarkStack*  mark_stack,
6134                                                        CMSCollector* collector,
6135                                                        bool should_yield,
6136                                                        bool concurrent_precleaning):
6137   _collector(collector),
6138   _span(span),
6139   _bit_map(bit_map),
6140   _mark_stack(mark_stack),
6141   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6142                       mark_stack, concurrent_precleaning),
6143   _yield(should_yield),
6144   _concurrent_precleaning(concurrent_precleaning),
6145   _freelistLock(NULL)
6146 {
6147   _ref_processor = rp;
6148   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6149 }
6150 
6151 // This closure is used to mark refs into the CMS generation at the
6152 // second (final) checkpoint, and to scan and transitively follow
6153 // the unmarked oops. It is also used during the concurrent precleaning
6154 // phase while scanning objects on dirty cards in the CMS generation.
6155 // The marks are made in the marking bit map and the marking stack is
6156 // used for keeping the (newly) grey objects during the scan.
6157 // The parallel version (Par_...) appears further below.
6158 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6159   if (obj != NULL) {
6160     assert(obj->is_oop(), "expected an oop");
6161     HeapWord* addr = (HeapWord*)obj;
6162     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6163     assert(_collector->overflow_list_is_empty(),
6164            "overflow list should be empty");
6165     if (_span.contains(addr) &&
6166         !_bit_map->isMarked(addr)) {
6167       // mark bit map (object is now grey)
6168       _bit_map->mark(addr);
6169       // push on marking stack (stack should be empty), and drain the
6170       // stack by applying this closure to the oops in the oops popped
6171       // from the stack (i.e. blacken the grey objects)
6172       bool res = _mark_stack->push(obj);
6173       assert(res, "Should have space to push on empty stack");
6174       do {
6175         oop new_oop = _mark_stack->pop();
6176         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6177         assert(_bit_map->isMarked((HeapWord*)new_oop),
6178                "only grey objects on this stack");
6179         // iterate over the oops in this oop, marking and pushing
6180         // the ones in CMS heap (i.e. in _span).
6181         new_oop->oop_iterate(&_pushAndMarkClosure);
6182         // check if it's time to yield
6183         do_yield_check();
6184       } while (!_mark_stack->isEmpty() ||
6185                (!_concurrent_precleaning && take_from_overflow_list()));
6186         // if marking stack is empty, and we are not doing this
6187         // during precleaning, then check the overflow list
6188     }
6189     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6190     assert(_collector->overflow_list_is_empty(),
6191            "overflow list was drained above");
6192     // We could restore evacuated mark words, if any, used for
6193     // overflow list links here because the overflow list is
6194     // provably empty here. That would reduce the maximum
6195     // size requirements for preserved_{oop,mark}_stack.
6196     // But we'll just postpone it until we are all done
6197     // so we can just stream through.
6198     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6199       _collector->restore_preserved_marks_if_any();
6200       assert(_collector->no_preserved_marks(), "No preserved marks");
6201     }
6202     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6203            "All preserved marks should have been restored above");
6204   }
6205 }
6206 
6207 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6208 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6209 
6210 void MarkRefsIntoAndScanClosure::do_yield_work() {
6211   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6212          "CMS thread should hold CMS token");
6213   assert_lock_strong(_freelistLock);
6214   assert_lock_strong(_bit_map->lock());
6215   // relinquish the free_list_lock and bitMaplock()
6216   _bit_map->lock()->unlock();
6217   _freelistLock->unlock();
6218   ConcurrentMarkSweepThread::desynchronize(true);
6219   _collector->stopTimer();
6220   if (PrintCMSStatistics != 0) {
6221     _collector->incrementYields();
6222   }
6223 
6224   // See the comment in coordinator_yield()
6225   for (unsigned i = 0;
6226        i < CMSYieldSleepCount &&
6227        ConcurrentMarkSweepThread::should_yield() &&
6228        !CMSCollector::foregroundGCIsActive();
6229        ++i) {
6230     os::sleep(Thread::current(), 1, false);
6231   }
6232 
6233   ConcurrentMarkSweepThread::synchronize(true);
6234   _freelistLock->lock_without_safepoint_check();
6235   _bit_map->lock()->lock_without_safepoint_check();
6236   _collector->startTimer();
6237 }
6238 
6239 ///////////////////////////////////////////////////////////
6240 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6241 //                                 MarkRefsIntoAndScanClosure
6242 ///////////////////////////////////////////////////////////
6243 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6244   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6245   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6246   _span(span),
6247   _bit_map(bit_map),
6248   _work_queue(work_queue),
6249   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6250                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6251   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6252 {
6253   _ref_processor = rp;
6254   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6255 }
6256 
6257 // This closure is used to mark refs into the CMS generation at the
6258 // second (final) checkpoint, and to scan and transitively follow
6259 // the unmarked oops. The marks are made in the marking bit map and
6260 // the work_queue is used for keeping the (newly) grey objects during
6261 // the scan phase whence they are also available for stealing by parallel
6262 // threads. Since the marking bit map is shared, updates are
6263 // synchronized (via CAS).
6264 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6265   if (obj != NULL) {
6266     // Ignore mark word because this could be an already marked oop
6267     // that may be chained at the end of the overflow list.
6268     assert(obj->is_oop(true), "expected an oop");
6269     HeapWord* addr = (HeapWord*)obj;
6270     if (_span.contains(addr) &&
6271         !_bit_map->isMarked(addr)) {
6272       // mark bit map (object will become grey):
6273       // It is possible for several threads to be
6274       // trying to "claim" this object concurrently;
6275       // the unique thread that succeeds in marking the
6276       // object first will do the subsequent push on
6277       // to the work queue (or overflow list).
6278       if (_bit_map->par_mark(addr)) {
6279         // push on work_queue (which may not be empty), and trim the
6280         // queue to an appropriate length by applying this closure to
6281         // the oops in the oops popped from the stack (i.e. blacken the
6282         // grey objects)
6283         bool res = _work_queue->push(obj);
6284         assert(res, "Low water mark should be less than capacity?");
6285         trim_queue(_low_water_mark);
6286       } // Else, another thread claimed the object
6287     }
6288   }
6289 }
6290 
6291 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6292 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6293 
6294 // This closure is used to rescan the marked objects on the dirty cards
6295 // in the mod union table and the card table proper.
6296 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6297   oop p, MemRegion mr) {
6298 
6299   size_t size = 0;
6300   HeapWord* addr = (HeapWord*)p;
6301   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6302   assert(_span.contains(addr), "we are scanning the CMS generation");
6303   // check if it's time to yield
6304   if (do_yield_check()) {
6305     // We yielded for some foreground stop-world work,
6306     // and we have been asked to abort this ongoing preclean cycle.
6307     return 0;
6308   }
6309   if (_bitMap->isMarked(addr)) {
6310     // it's marked; is it potentially uninitialized?
6311     if (p->klass_or_null() != NULL) {
6312         // an initialized object; ignore mark word in verification below
6313         // since we are running concurrent with mutators
6314         assert(p->is_oop(true), "should be an oop");
6315         if (p->is_objArray()) {
6316           // objArrays are precisely marked; restrict scanning
6317           // to dirty cards only.
6318           size = CompactibleFreeListSpace::adjustObjectSize(
6319                    p->oop_iterate(_scanningClosure, mr));
6320         } else {
6321           // A non-array may have been imprecisely marked; we need
6322           // to scan object in its entirety.
6323           size = CompactibleFreeListSpace::adjustObjectSize(
6324                    p->oop_iterate(_scanningClosure));
6325         }
6326         #ifdef ASSERT
6327           size_t direct_size =
6328             CompactibleFreeListSpace::adjustObjectSize(p->size());
6329           assert(size == direct_size, "Inconsistency in size");
6330           assert(size >= 3, "Necessary for Printezis marks to work");
6331           if (!_bitMap->isMarked(addr+1)) {
6332             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6333           } else {
6334             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6335             assert(_bitMap->isMarked(addr+size-1),
6336                    "inconsistent Printezis mark");
6337           }
6338         #endif // ASSERT
6339     } else {
6340       // An uninitialized object.
6341       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6342       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6343       size = pointer_delta(nextOneAddr + 1, addr);
6344       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6345              "alignment problem");
6346       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6347       // will dirty the card when the klass pointer is installed in the
6348       // object (signaling the completion of initialization).
6349     }
6350   } else {
6351     // Either a not yet marked object or an uninitialized object
6352     if (p->klass_or_null() == NULL) {
6353       // An uninitialized object, skip to the next card, since
6354       // we may not be able to read its P-bits yet.
6355       assert(size == 0, "Initial value");
6356     } else {
6357       // An object not (yet) reached by marking: we merely need to
6358       // compute its size so as to go look at the next block.
6359       assert(p->is_oop(true), "should be an oop");
6360       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6361     }
6362   }
6363   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6364   return size;
6365 }
6366 
6367 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6368   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6369          "CMS thread should hold CMS token");
6370   assert_lock_strong(_freelistLock);
6371   assert_lock_strong(_bitMap->lock());
6372   // relinquish the free_list_lock and bitMaplock()
6373   _bitMap->lock()->unlock();
6374   _freelistLock->unlock();
6375   ConcurrentMarkSweepThread::desynchronize(true);
6376   _collector->stopTimer();
6377   if (PrintCMSStatistics != 0) {
6378     _collector->incrementYields();
6379   }
6380 
6381   // See the comment in coordinator_yield()
6382   for (unsigned i = 0; i < CMSYieldSleepCount &&
6383                    ConcurrentMarkSweepThread::should_yield() &&
6384                    !CMSCollector::foregroundGCIsActive(); ++i) {
6385     os::sleep(Thread::current(), 1, false);
6386   }
6387 
6388   ConcurrentMarkSweepThread::synchronize(true);
6389   _freelistLock->lock_without_safepoint_check();
6390   _bitMap->lock()->lock_without_safepoint_check();
6391   _collector->startTimer();
6392 }
6393 
6394 
6395 //////////////////////////////////////////////////////////////////
6396 // SurvivorSpacePrecleanClosure
6397 //////////////////////////////////////////////////////////////////
6398 // This (single-threaded) closure is used to preclean the oops in
6399 // the survivor spaces.
6400 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6401 
6402   HeapWord* addr = (HeapWord*)p;
6403   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6404   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6405   assert(p->klass_or_null() != NULL, "object should be initialized");
6406   // an initialized object; ignore mark word in verification below
6407   // since we are running concurrent with mutators
6408   assert(p->is_oop(true), "should be an oop");
6409   // Note that we do not yield while we iterate over
6410   // the interior oops of p, pushing the relevant ones
6411   // on our marking stack.
6412   size_t size = p->oop_iterate(_scanning_closure);
6413   do_yield_check();
6414   // Observe that below, we do not abandon the preclean
6415   // phase as soon as we should; rather we empty the
6416   // marking stack before returning. This is to satisfy
6417   // some existing assertions. In general, it may be a
6418   // good idea to abort immediately and complete the marking
6419   // from the grey objects at a later time.
6420   while (!_mark_stack->isEmpty()) {
6421     oop new_oop = _mark_stack->pop();
6422     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6423     assert(_bit_map->isMarked((HeapWord*)new_oop),
6424            "only grey objects on this stack");
6425     // iterate over the oops in this oop, marking and pushing
6426     // the ones in CMS heap (i.e. in _span).
6427     new_oop->oop_iterate(_scanning_closure);
6428     // check if it's time to yield
6429     do_yield_check();
6430   }
6431   unsigned int after_count =
6432     GenCollectedHeap::heap()->total_collections();
6433   bool abort = (_before_count != after_count) ||
6434                _collector->should_abort_preclean();
6435   return abort ? 0 : size;
6436 }
6437 
6438 void SurvivorSpacePrecleanClosure::do_yield_work() {
6439   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6440          "CMS thread should hold CMS token");
6441   assert_lock_strong(_bit_map->lock());
6442   // Relinquish the bit map lock
6443   _bit_map->lock()->unlock();
6444   ConcurrentMarkSweepThread::desynchronize(true);
6445   _collector->stopTimer();
6446   if (PrintCMSStatistics != 0) {
6447     _collector->incrementYields();
6448   }
6449 
6450   // See the comment in coordinator_yield()
6451   for (unsigned i = 0; i < CMSYieldSleepCount &&
6452                        ConcurrentMarkSweepThread::should_yield() &&
6453                        !CMSCollector::foregroundGCIsActive(); ++i) {
6454     os::sleep(Thread::current(), 1, false);
6455   }
6456 
6457   ConcurrentMarkSweepThread::synchronize(true);
6458   _bit_map->lock()->lock_without_safepoint_check();
6459   _collector->startTimer();
6460 }
6461 
6462 // This closure is used to rescan the marked objects on the dirty cards
6463 // in the mod union table and the card table proper. In the parallel
6464 // case, although the bitMap is shared, we do a single read so the
6465 // isMarked() query is "safe".
6466 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6467   // Ignore mark word because we are running concurrent with mutators
6468   assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6469   HeapWord* addr = (HeapWord*)p;
6470   assert(_span.contains(addr), "we are scanning the CMS generation");
6471   bool is_obj_array = false;
6472   #ifdef ASSERT
6473     if (!_parallel) {
6474       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6475       assert(_collector->overflow_list_is_empty(),
6476              "overflow list should be empty");
6477 
6478     }
6479   #endif // ASSERT
6480   if (_bit_map->isMarked(addr)) {
6481     // Obj arrays are precisely marked, non-arrays are not;
6482     // so we scan objArrays precisely and non-arrays in their
6483     // entirety.
6484     if (p->is_objArray()) {
6485       is_obj_array = true;
6486       if (_parallel) {
6487         p->oop_iterate(_par_scan_closure, mr);
6488       } else {
6489         p->oop_iterate(_scan_closure, mr);
6490       }
6491     } else {
6492       if (_parallel) {
6493         p->oop_iterate(_par_scan_closure);
6494       } else {
6495         p->oop_iterate(_scan_closure);
6496       }
6497     }
6498   }
6499   #ifdef ASSERT
6500     if (!_parallel) {
6501       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6502       assert(_collector->overflow_list_is_empty(),
6503              "overflow list should be empty");
6504 
6505     }
6506   #endif // ASSERT
6507   return is_obj_array;
6508 }
6509 
6510 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6511                         MemRegion span,
6512                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6513                         bool should_yield, bool verifying):
6514   _collector(collector),
6515   _span(span),
6516   _bitMap(bitMap),
6517   _mut(&collector->_modUnionTable),
6518   _markStack(markStack),
6519   _yield(should_yield),
6520   _skipBits(0)
6521 {
6522   assert(_markStack->isEmpty(), "stack should be empty");
6523   _finger = _bitMap->startWord();
6524   _threshold = _finger;
6525   assert(_collector->_restart_addr == NULL, "Sanity check");
6526   assert(_span.contains(_finger), "Out of bounds _finger?");
6527   DEBUG_ONLY(_verifying = verifying;)
6528 }
6529 
6530 void MarkFromRootsClosure::reset(HeapWord* addr) {
6531   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6532   assert(_span.contains(addr), "Out of bounds _finger?");
6533   _finger = addr;
6534   _threshold = (HeapWord*)round_to(
6535                  (intptr_t)_finger, CardTableModRefBS::card_size);
6536 }
6537 
6538 // Should revisit to see if this should be restructured for
6539 // greater efficiency.
6540 bool MarkFromRootsClosure::do_bit(size_t offset) {
6541   if (_skipBits > 0) {
6542     _skipBits--;
6543     return true;
6544   }
6545   // convert offset into a HeapWord*
6546   HeapWord* addr = _bitMap->startWord() + offset;
6547   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6548          "address out of range");
6549   assert(_bitMap->isMarked(addr), "tautology");
6550   if (_bitMap->isMarked(addr+1)) {
6551     // this is an allocated but not yet initialized object
6552     assert(_skipBits == 0, "tautology");
6553     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6554     oop p = oop(addr);
6555     if (p->klass_or_null() == NULL) {
6556       DEBUG_ONLY(if (!_verifying) {)
6557         // We re-dirty the cards on which this object lies and increase
6558         // the _threshold so that we'll come back to scan this object
6559         // during the preclean or remark phase. (CMSCleanOnEnter)
6560         if (CMSCleanOnEnter) {
6561           size_t sz = _collector->block_size_using_printezis_bits(addr);
6562           HeapWord* end_card_addr   = (HeapWord*)round_to(
6563                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6564           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6565           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6566           // Bump _threshold to end_card_addr; note that
6567           // _threshold cannot possibly exceed end_card_addr, anyhow.
6568           // This prevents future clearing of the card as the scan proceeds
6569           // to the right.
6570           assert(_threshold <= end_card_addr,
6571                  "Because we are just scanning into this object");
6572           if (_threshold < end_card_addr) {
6573             _threshold = end_card_addr;
6574           }
6575           if (p->klass_or_null() != NULL) {
6576             // Redirty the range of cards...
6577             _mut->mark_range(redirty_range);
6578           } // ...else the setting of klass will dirty the card anyway.
6579         }
6580       DEBUG_ONLY(})
6581       return true;
6582     }
6583   }
6584   scanOopsInOop(addr);
6585   return true;
6586 }
6587 
6588 // We take a break if we've been at this for a while,
6589 // so as to avoid monopolizing the locks involved.
6590 void MarkFromRootsClosure::do_yield_work() {
6591   // First give up the locks, then yield, then re-lock
6592   // We should probably use a constructor/destructor idiom to
6593   // do this unlock/lock or modify the MutexUnlocker class to
6594   // serve our purpose. XXX
6595   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6596          "CMS thread should hold CMS token");
6597   assert_lock_strong(_bitMap->lock());
6598   _bitMap->lock()->unlock();
6599   ConcurrentMarkSweepThread::desynchronize(true);
6600   _collector->stopTimer();
6601   if (PrintCMSStatistics != 0) {
6602     _collector->incrementYields();
6603   }
6604 
6605   // See the comment in coordinator_yield()
6606   for (unsigned i = 0; i < CMSYieldSleepCount &&
6607                        ConcurrentMarkSweepThread::should_yield() &&
6608                        !CMSCollector::foregroundGCIsActive(); ++i) {
6609     os::sleep(Thread::current(), 1, false);
6610   }
6611 
6612   ConcurrentMarkSweepThread::synchronize(true);
6613   _bitMap->lock()->lock_without_safepoint_check();
6614   _collector->startTimer();
6615 }
6616 
6617 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6618   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6619   assert(_markStack->isEmpty(),
6620          "should drain stack to limit stack usage");
6621   // convert ptr to an oop preparatory to scanning
6622   oop obj = oop(ptr);
6623   // Ignore mark word in verification below, since we
6624   // may be running concurrent with mutators.
6625   assert(obj->is_oop(true), "should be an oop");
6626   assert(_finger <= ptr, "_finger runneth ahead");
6627   // advance the finger to right end of this object
6628   _finger = ptr + obj->size();
6629   assert(_finger > ptr, "we just incremented it above");
6630   // On large heaps, it may take us some time to get through
6631   // the marking phase. During
6632   // this time it's possible that a lot of mutations have
6633   // accumulated in the card table and the mod union table --
6634   // these mutation records are redundant until we have
6635   // actually traced into the corresponding card.
6636   // Here, we check whether advancing the finger would make
6637   // us cross into a new card, and if so clear corresponding
6638   // cards in the MUT (preclean them in the card-table in the
6639   // future).
6640 
6641   DEBUG_ONLY(if (!_verifying) {)
6642     // The clean-on-enter optimization is disabled by default,
6643     // until we fix 6178663.
6644     if (CMSCleanOnEnter && (_finger > _threshold)) {
6645       // [_threshold, _finger) represents the interval
6646       // of cards to be cleared  in MUT (or precleaned in card table).
6647       // The set of cards to be cleared is all those that overlap
6648       // with the interval [_threshold, _finger); note that
6649       // _threshold is always kept card-aligned but _finger isn't
6650       // always card-aligned.
6651       HeapWord* old_threshold = _threshold;
6652       assert(old_threshold == (HeapWord*)round_to(
6653               (intptr_t)old_threshold, CardTableModRefBS::card_size),
6654              "_threshold should always be card-aligned");
6655       _threshold = (HeapWord*)round_to(
6656                      (intptr_t)_finger, CardTableModRefBS::card_size);
6657       MemRegion mr(old_threshold, _threshold);
6658       assert(!mr.is_empty(), "Control point invariant");
6659       assert(_span.contains(mr), "Should clear within span");
6660       _mut->clear_range(mr);
6661     }
6662   DEBUG_ONLY(})
6663   // Note: the finger doesn't advance while we drain
6664   // the stack below.
6665   PushOrMarkClosure pushOrMarkClosure(_collector,
6666                                       _span, _bitMap, _markStack,
6667                                       _finger, this);
6668   bool res = _markStack->push(obj);
6669   assert(res, "Empty non-zero size stack should have space for single push");
6670   while (!_markStack->isEmpty()) {
6671     oop new_oop = _markStack->pop();
6672     // Skip verifying header mark word below because we are
6673     // running concurrent with mutators.
6674     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6675     // now scan this oop's oops
6676     new_oop->oop_iterate(&pushOrMarkClosure);
6677     do_yield_check();
6678   }
6679   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6680 }
6681 
6682 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6683                        CMSCollector* collector, MemRegion span,
6684                        CMSBitMap* bit_map,
6685                        OopTaskQueue* work_queue,
6686                        CMSMarkStack*  overflow_stack):
6687   _collector(collector),
6688   _whole_span(collector->_span),
6689   _span(span),
6690   _bit_map(bit_map),
6691   _mut(&collector->_modUnionTable),
6692   _work_queue(work_queue),
6693   _overflow_stack(overflow_stack),
6694   _skip_bits(0),
6695   _task(task)
6696 {
6697   assert(_work_queue->size() == 0, "work_queue should be empty");
6698   _finger = span.start();
6699   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6700   assert(_span.contains(_finger), "Out of bounds _finger?");
6701 }
6702 
6703 // Should revisit to see if this should be restructured for
6704 // greater efficiency.
6705 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6706   if (_skip_bits > 0) {
6707     _skip_bits--;
6708     return true;
6709   }
6710   // convert offset into a HeapWord*
6711   HeapWord* addr = _bit_map->startWord() + offset;
6712   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6713          "address out of range");
6714   assert(_bit_map->isMarked(addr), "tautology");
6715   if (_bit_map->isMarked(addr+1)) {
6716     // this is an allocated object that might not yet be initialized
6717     assert(_skip_bits == 0, "tautology");
6718     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6719     oop p = oop(addr);
6720     if (p->klass_or_null() == NULL) {
6721       // in the case of Clean-on-Enter optimization, redirty card
6722       // and avoid clearing card by increasing  the threshold.
6723       return true;
6724     }
6725   }
6726   scan_oops_in_oop(addr);
6727   return true;
6728 }
6729 
6730 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6731   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6732   // Should we assert that our work queue is empty or
6733   // below some drain limit?
6734   assert(_work_queue->size() == 0,
6735          "should drain stack to limit stack usage");
6736   // convert ptr to an oop preparatory to scanning
6737   oop obj = oop(ptr);
6738   // Ignore mark word in verification below, since we
6739   // may be running concurrent with mutators.
6740   assert(obj->is_oop(true), "should be an oop");
6741   assert(_finger <= ptr, "_finger runneth ahead");
6742   // advance the finger to right end of this object
6743   _finger = ptr + obj->size();
6744   assert(_finger > ptr, "we just incremented it above");
6745   // On large heaps, it may take us some time to get through
6746   // the marking phase. During
6747   // this time it's possible that a lot of mutations have
6748   // accumulated in the card table and the mod union table --
6749   // these mutation records are redundant until we have
6750   // actually traced into the corresponding card.
6751   // Here, we check whether advancing the finger would make
6752   // us cross into a new card, and if so clear corresponding
6753   // cards in the MUT (preclean them in the card-table in the
6754   // future).
6755 
6756   // The clean-on-enter optimization is disabled by default,
6757   // until we fix 6178663.
6758   if (CMSCleanOnEnter && (_finger > _threshold)) {
6759     // [_threshold, _finger) represents the interval
6760     // of cards to be cleared  in MUT (or precleaned in card table).
6761     // The set of cards to be cleared is all those that overlap
6762     // with the interval [_threshold, _finger); note that
6763     // _threshold is always kept card-aligned but _finger isn't
6764     // always card-aligned.
6765     HeapWord* old_threshold = _threshold;
6766     assert(old_threshold == (HeapWord*)round_to(
6767             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6768            "_threshold should always be card-aligned");
6769     _threshold = (HeapWord*)round_to(
6770                    (intptr_t)_finger, CardTableModRefBS::card_size);
6771     MemRegion mr(old_threshold, _threshold);
6772     assert(!mr.is_empty(), "Control point invariant");
6773     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6774     _mut->clear_range(mr);
6775   }
6776 
6777   // Note: the local finger doesn't advance while we drain
6778   // the stack below, but the global finger sure can and will.
6779   HeapWord** gfa = _task->global_finger_addr();
6780   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6781                                       _span, _bit_map,
6782                                       _work_queue,
6783                                       _overflow_stack,
6784                                       _finger,
6785                                       gfa, this);
6786   bool res = _work_queue->push(obj);   // overflow could occur here
6787   assert(res, "Will hold once we use workqueues");
6788   while (true) {
6789     oop new_oop;
6790     if (!_work_queue->pop_local(new_oop)) {
6791       // We emptied our work_queue; check if there's stuff that can
6792       // be gotten from the overflow stack.
6793       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6794             _overflow_stack, _work_queue)) {
6795         do_yield_check();
6796         continue;
6797       } else {  // done
6798         break;
6799       }
6800     }
6801     // Skip verifying header mark word below because we are
6802     // running concurrent with mutators.
6803     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6804     // now scan this oop's oops
6805     new_oop->oop_iterate(&pushOrMarkClosure);
6806     do_yield_check();
6807   }
6808   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6809 }
6810 
6811 // Yield in response to a request from VM Thread or
6812 // from mutators.
6813 void Par_MarkFromRootsClosure::do_yield_work() {
6814   assert(_task != NULL, "sanity");
6815   _task->yield();
6816 }
6817 
6818 // A variant of the above used for verifying CMS marking work.
6819 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6820                         MemRegion span,
6821                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6822                         CMSMarkStack*  mark_stack):
6823   _collector(collector),
6824   _span(span),
6825   _verification_bm(verification_bm),
6826   _cms_bm(cms_bm),
6827   _mark_stack(mark_stack),
6828   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6829                       mark_stack)
6830 {
6831   assert(_mark_stack->isEmpty(), "stack should be empty");
6832   _finger = _verification_bm->startWord();
6833   assert(_collector->_restart_addr == NULL, "Sanity check");
6834   assert(_span.contains(_finger), "Out of bounds _finger?");
6835 }
6836 
6837 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6838   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6839   assert(_span.contains(addr), "Out of bounds _finger?");
6840   _finger = addr;
6841 }
6842 
6843 // Should revisit to see if this should be restructured for
6844 // greater efficiency.
6845 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6846   // convert offset into a HeapWord*
6847   HeapWord* addr = _verification_bm->startWord() + offset;
6848   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6849          "address out of range");
6850   assert(_verification_bm->isMarked(addr), "tautology");
6851   assert(_cms_bm->isMarked(addr), "tautology");
6852 
6853   assert(_mark_stack->isEmpty(),
6854          "should drain stack to limit stack usage");
6855   // convert addr to an oop preparatory to scanning
6856   oop obj = oop(addr);
6857   assert(obj->is_oop(), "should be an oop");
6858   assert(_finger <= addr, "_finger runneth ahead");
6859   // advance the finger to right end of this object
6860   _finger = addr + obj->size();
6861   assert(_finger > addr, "we just incremented it above");
6862   // Note: the finger doesn't advance while we drain
6863   // the stack below.
6864   bool res = _mark_stack->push(obj);
6865   assert(res, "Empty non-zero size stack should have space for single push");
6866   while (!_mark_stack->isEmpty()) {
6867     oop new_oop = _mark_stack->pop();
6868     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6869     // now scan this oop's oops
6870     new_oop->oop_iterate(&_pam_verify_closure);
6871   }
6872   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6873   return true;
6874 }
6875 
6876 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6877   CMSCollector* collector, MemRegion span,
6878   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6879   CMSMarkStack*  mark_stack):
6880   MetadataAwareOopClosure(collector->ref_processor()),
6881   _collector(collector),
6882   _span(span),
6883   _verification_bm(verification_bm),
6884   _cms_bm(cms_bm),
6885   _mark_stack(mark_stack)
6886 { }
6887 
6888 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6889 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6890 
6891 // Upon stack overflow, we discard (part of) the stack,
6892 // remembering the least address amongst those discarded
6893 // in CMSCollector's _restart_address.
6894 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6895   // Remember the least grey address discarded
6896   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6897   _collector->lower_restart_addr(ra);
6898   _mark_stack->reset();  // discard stack contents
6899   _mark_stack->expand(); // expand the stack if possible
6900 }
6901 
6902 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6903   assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6904   HeapWord* addr = (HeapWord*)obj;
6905   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6906     // Oop lies in _span and isn't yet grey or black
6907     _verification_bm->mark(addr);            // now grey
6908     if (!_cms_bm->isMarked(addr)) {
6909       oop(addr)->print();
6910       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6911                              p2i(addr));
6912       fatal("... aborting");
6913     }
6914 
6915     if (!_mark_stack->push(obj)) { // stack overflow
6916       if (PrintCMSStatistics != 0) {
6917         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6918                                SIZE_FORMAT, _mark_stack->capacity());
6919       }
6920       assert(_mark_stack->isFull(), "Else push should have succeeded");
6921       handle_stack_overflow(addr);
6922     }
6923     // anything including and to the right of _finger
6924     // will be scanned as we iterate over the remainder of the
6925     // bit map
6926   }
6927 }
6928 
6929 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6930                      MemRegion span,
6931                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6932                      HeapWord* finger, MarkFromRootsClosure* parent) :
6933   MetadataAwareOopClosure(collector->ref_processor()),
6934   _collector(collector),
6935   _span(span),
6936   _bitMap(bitMap),
6937   _markStack(markStack),
6938   _finger(finger),
6939   _parent(parent)
6940 { }
6941 
6942 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6943                      MemRegion span,
6944                      CMSBitMap* bit_map,
6945                      OopTaskQueue* work_queue,
6946                      CMSMarkStack*  overflow_stack,
6947                      HeapWord* finger,
6948                      HeapWord** global_finger_addr,
6949                      Par_MarkFromRootsClosure* parent) :
6950   MetadataAwareOopClosure(collector->ref_processor()),
6951   _collector(collector),
6952   _whole_span(collector->_span),
6953   _span(span),
6954   _bit_map(bit_map),
6955   _work_queue(work_queue),
6956   _overflow_stack(overflow_stack),
6957   _finger(finger),
6958   _global_finger_addr(global_finger_addr),
6959   _parent(parent)
6960 { }
6961 
6962 // Assumes thread-safe access by callers, who are
6963 // responsible for mutual exclusion.
6964 void CMSCollector::lower_restart_addr(HeapWord* low) {
6965   assert(_span.contains(low), "Out of bounds addr");
6966   if (_restart_addr == NULL) {
6967     _restart_addr = low;
6968   } else {
6969     _restart_addr = MIN2(_restart_addr, low);
6970   }
6971 }
6972 
6973 // Upon stack overflow, we discard (part of) the stack,
6974 // remembering the least address amongst those discarded
6975 // in CMSCollector's _restart_address.
6976 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6977   // Remember the least grey address discarded
6978   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6979   _collector->lower_restart_addr(ra);
6980   _markStack->reset();  // discard stack contents
6981   _markStack->expand(); // expand the stack if possible
6982 }
6983 
6984 // Upon stack overflow, we discard (part of) the stack,
6985 // remembering the least address amongst those discarded
6986 // in CMSCollector's _restart_address.
6987 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6988   // We need to do this under a mutex to prevent other
6989   // workers from interfering with the work done below.
6990   MutexLockerEx ml(_overflow_stack->par_lock(),
6991                    Mutex::_no_safepoint_check_flag);
6992   // Remember the least grey address discarded
6993   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6994   _collector->lower_restart_addr(ra);
6995   _overflow_stack->reset();  // discard stack contents
6996   _overflow_stack->expand(); // expand the stack if possible
6997 }
6998 
6999 void PushOrMarkClosure::do_oop(oop obj) {
7000   // Ignore mark word because we are running concurrent with mutators.
7001   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7002   HeapWord* addr = (HeapWord*)obj;
7003   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7004     // Oop lies in _span and isn't yet grey or black
7005     _bitMap->mark(addr);            // now grey
7006     if (addr < _finger) {
7007       // the bit map iteration has already either passed, or
7008       // sampled, this bit in the bit map; we'll need to
7009       // use the marking stack to scan this oop's oops.
7010       bool simulate_overflow = false;
7011       NOT_PRODUCT(
7012         if (CMSMarkStackOverflowALot &&
7013             _collector->simulate_overflow()) {
7014           // simulate a stack overflow
7015           simulate_overflow = true;
7016         }
7017       )
7018       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7019         if (PrintCMSStatistics != 0) {
7020           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7021                                  SIZE_FORMAT, _markStack->capacity());
7022         }
7023         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7024         handle_stack_overflow(addr);
7025       }
7026     }
7027     // anything including and to the right of _finger
7028     // will be scanned as we iterate over the remainder of the
7029     // bit map
7030     do_yield_check();
7031   }
7032 }
7033 
7034 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7035 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7036 
7037 void Par_PushOrMarkClosure::do_oop(oop obj) {
7038   // Ignore mark word because we are running concurrent with mutators.
7039   assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7040   HeapWord* addr = (HeapWord*)obj;
7041   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7042     // Oop lies in _span and isn't yet grey or black
7043     // We read the global_finger (volatile read) strictly after marking oop
7044     bool res = _bit_map->par_mark(addr);    // now grey
7045     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7046     // Should we push this marked oop on our stack?
7047     // -- if someone else marked it, nothing to do
7048     // -- if target oop is above global finger nothing to do
7049     // -- if target oop is in chunk and above local finger
7050     //      then nothing to do
7051     // -- else push on work queue
7052     if (   !res       // someone else marked it, they will deal with it
7053         || (addr >= *gfa)  // will be scanned in a later task
7054         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7055       return;
7056     }
7057     // the bit map iteration has already either passed, or
7058     // sampled, this bit in the bit map; we'll need to
7059     // use the marking stack to scan this oop's oops.
7060     bool simulate_overflow = false;
7061     NOT_PRODUCT(
7062       if (CMSMarkStackOverflowALot &&
7063           _collector->simulate_overflow()) {
7064         // simulate a stack overflow
7065         simulate_overflow = true;
7066       }
7067     )
7068     if (simulate_overflow ||
7069         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7070       // stack overflow
7071       if (PrintCMSStatistics != 0) {
7072         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7073                                SIZE_FORMAT, _overflow_stack->capacity());
7074       }
7075       // We cannot assert that the overflow stack is full because
7076       // it may have been emptied since.
7077       assert(simulate_overflow ||
7078              _work_queue->size() == _work_queue->max_elems(),
7079             "Else push should have succeeded");
7080       handle_stack_overflow(addr);
7081     }
7082     do_yield_check();
7083   }
7084 }
7085 
7086 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7087 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7088 
7089 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7090                                        MemRegion span,
7091                                        ReferenceProcessor* rp,
7092                                        CMSBitMap* bit_map,
7093                                        CMSBitMap* mod_union_table,
7094                                        CMSMarkStack*  mark_stack,
7095                                        bool           concurrent_precleaning):
7096   MetadataAwareOopClosure(rp),
7097   _collector(collector),
7098   _span(span),
7099   _bit_map(bit_map),
7100   _mod_union_table(mod_union_table),
7101   _mark_stack(mark_stack),
7102   _concurrent_precleaning(concurrent_precleaning)
7103 {
7104   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7105 }
7106 
7107 // Grey object rescan during pre-cleaning and second checkpoint phases --
7108 // the non-parallel version (the parallel version appears further below.)
7109 void PushAndMarkClosure::do_oop(oop obj) {
7110   // Ignore mark word verification. If during concurrent precleaning,
7111   // the object monitor may be locked. If during the checkpoint
7112   // phases, the object may already have been reached by a  different
7113   // path and may be at the end of the global overflow list (so
7114   // the mark word may be NULL).
7115   assert(obj->is_oop_or_null(true /* ignore mark word */),
7116          err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7117   HeapWord* addr = (HeapWord*)obj;
7118   // Check if oop points into the CMS generation
7119   // and is not marked
7120   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7121     // a white object ...
7122     _bit_map->mark(addr);         // ... now grey
7123     // push on the marking stack (grey set)
7124     bool simulate_overflow = false;
7125     NOT_PRODUCT(
7126       if (CMSMarkStackOverflowALot &&
7127           _collector->simulate_overflow()) {
7128         // simulate a stack overflow
7129         simulate_overflow = true;
7130       }
7131     )
7132     if (simulate_overflow || !_mark_stack->push(obj)) {
7133       if (_concurrent_precleaning) {
7134          // During precleaning we can just dirty the appropriate card(s)
7135          // in the mod union table, thus ensuring that the object remains
7136          // in the grey set  and continue. In the case of object arrays
7137          // we need to dirty all of the cards that the object spans,
7138          // since the rescan of object arrays will be limited to the
7139          // dirty cards.
7140          // Note that no one can be interfering with us in this action
7141          // of dirtying the mod union table, so no locking or atomics
7142          // are required.
7143          if (obj->is_objArray()) {
7144            size_t sz = obj->size();
7145            HeapWord* end_card_addr = (HeapWord*)round_to(
7146                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7147            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7148            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7149            _mod_union_table->mark_range(redirty_range);
7150          } else {
7151            _mod_union_table->mark(addr);
7152          }
7153          _collector->_ser_pmc_preclean_ovflw++;
7154       } else {
7155          // During the remark phase, we need to remember this oop
7156          // in the overflow list.
7157          _collector->push_on_overflow_list(obj);
7158          _collector->_ser_pmc_remark_ovflw++;
7159       }
7160     }
7161   }
7162 }
7163 
7164 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7165                                                MemRegion span,
7166                                                ReferenceProcessor* rp,
7167                                                CMSBitMap* bit_map,
7168                                                OopTaskQueue* work_queue):
7169   MetadataAwareOopClosure(rp),
7170   _collector(collector),
7171   _span(span),
7172   _bit_map(bit_map),
7173   _work_queue(work_queue)
7174 {
7175   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7176 }
7177 
7178 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7179 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7180 
7181 // Grey object rescan during second checkpoint phase --
7182 // the parallel version.
7183 void Par_PushAndMarkClosure::do_oop(oop obj) {
7184   // In the assert below, we ignore the mark word because
7185   // this oop may point to an already visited object that is
7186   // on the overflow stack (in which case the mark word has
7187   // been hijacked for chaining into the overflow stack --
7188   // if this is the last object in the overflow stack then
7189   // its mark word will be NULL). Because this object may
7190   // have been subsequently popped off the global overflow
7191   // stack, and the mark word possibly restored to the prototypical
7192   // value, by the time we get to examined this failing assert in
7193   // the debugger, is_oop_or_null(false) may subsequently start
7194   // to hold.
7195   assert(obj->is_oop_or_null(true),
7196          err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7197   HeapWord* addr = (HeapWord*)obj;
7198   // Check if oop points into the CMS generation
7199   // and is not marked
7200   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7201     // a white object ...
7202     // If we manage to "claim" the object, by being the
7203     // first thread to mark it, then we push it on our
7204     // marking stack
7205     if (_bit_map->par_mark(addr)) {     // ... now grey
7206       // push on work queue (grey set)
7207       bool simulate_overflow = false;
7208       NOT_PRODUCT(
7209         if (CMSMarkStackOverflowALot &&
7210             _collector->par_simulate_overflow()) {
7211           // simulate a stack overflow
7212           simulate_overflow = true;
7213         }
7214       )
7215       if (simulate_overflow || !_work_queue->push(obj)) {
7216         _collector->par_push_on_overflow_list(obj);
7217         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7218       }
7219     } // Else, some other thread got there first
7220   }
7221 }
7222 
7223 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7224 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7225 
7226 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7227   Mutex* bml = _collector->bitMapLock();
7228   assert_lock_strong(bml);
7229   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7230          "CMS thread should hold CMS token");
7231 
7232   bml->unlock();
7233   ConcurrentMarkSweepThread::desynchronize(true);
7234 
7235   _collector->stopTimer();
7236   if (PrintCMSStatistics != 0) {
7237     _collector->incrementYields();
7238   }
7239 
7240   // See the comment in coordinator_yield()
7241   for (unsigned i = 0; i < CMSYieldSleepCount &&
7242                        ConcurrentMarkSweepThread::should_yield() &&
7243                        !CMSCollector::foregroundGCIsActive(); ++i) {
7244     os::sleep(Thread::current(), 1, false);
7245   }
7246 
7247   ConcurrentMarkSweepThread::synchronize(true);
7248   bml->lock();
7249 
7250   _collector->startTimer();
7251 }
7252 
7253 bool CMSPrecleanRefsYieldClosure::should_return() {
7254   if (ConcurrentMarkSweepThread::should_yield()) {
7255     do_yield_work();
7256   }
7257   return _collector->foregroundGCIsActive();
7258 }
7259 
7260 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7261   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7262          "mr should be aligned to start at a card boundary");
7263   // We'd like to assert:
7264   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7265   //        "mr should be a range of cards");
7266   // However, that would be too strong in one case -- the last
7267   // partition ends at _unallocated_block which, in general, can be
7268   // an arbitrary boundary, not necessarily card aligned.
7269   if (PrintCMSStatistics != 0) {
7270     _num_dirty_cards +=
7271          mr.word_size()/CardTableModRefBS::card_size_in_words;
7272   }
7273   _space->object_iterate_mem(mr, &_scan_cl);
7274 }
7275 
7276 SweepClosure::SweepClosure(CMSCollector* collector,
7277                            ConcurrentMarkSweepGeneration* g,
7278                            CMSBitMap* bitMap, bool should_yield) :
7279   _collector(collector),
7280   _g(g),
7281   _sp(g->cmsSpace()),
7282   _limit(_sp->sweep_limit()),
7283   _freelistLock(_sp->freelistLock()),
7284   _bitMap(bitMap),
7285   _yield(should_yield),
7286   _inFreeRange(false),           // No free range at beginning of sweep
7287   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7288   _lastFreeRangeCoalesced(false),
7289   _freeFinger(g->used_region().start())
7290 {
7291   NOT_PRODUCT(
7292     _numObjectsFreed = 0;
7293     _numWordsFreed   = 0;
7294     _numObjectsLive = 0;
7295     _numWordsLive = 0;
7296     _numObjectsAlreadyFree = 0;
7297     _numWordsAlreadyFree = 0;
7298     _last_fc = NULL;
7299 
7300     _sp->initializeIndexedFreeListArrayReturnedBytes();
7301     _sp->dictionary()->initialize_dict_returned_bytes();
7302   )
7303   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7304          "sweep _limit out of bounds");
7305   if (CMSTraceSweeper) {
7306     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7307                         p2i(_limit));
7308   }
7309 }
7310 
7311 void SweepClosure::print_on(outputStream* st) const {
7312   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7313                 p2i(_sp->bottom()), p2i(_sp->end()));
7314   tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7315   tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7316   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7317   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7318                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7319 }
7320 
7321 #ifndef PRODUCT
7322 // Assertion checking only:  no useful work in product mode --
7323 // however, if any of the flags below become product flags,
7324 // you may need to review this code to see if it needs to be
7325 // enabled in product mode.
7326 SweepClosure::~SweepClosure() {
7327   assert_lock_strong(_freelistLock);
7328   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7329          "sweep _limit out of bounds");
7330   if (inFreeRange()) {
7331     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7332     print();
7333     ShouldNotReachHere();
7334   }
7335   if (Verbose && PrintGC) {
7336     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7337                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7338     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
7339                            SIZE_FORMAT" bytes  "
7340       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7341       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7342       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7343     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7344                         * sizeof(HeapWord);
7345     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7346 
7347     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7348       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7349       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7350       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7351       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7352       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7353         indexListReturnedBytes);
7354       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7355         dict_returned_bytes);
7356     }
7357   }
7358   if (CMSTraceSweeper) {
7359     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7360                            p2i(_limit));
7361   }
7362 }
7363 #endif  // PRODUCT
7364 
7365 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7366     bool freeRangeInFreeLists) {
7367   if (CMSTraceSweeper) {
7368     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7369                p2i(freeFinger), freeRangeInFreeLists);
7370   }
7371   assert(!inFreeRange(), "Trampling existing free range");
7372   set_inFreeRange(true);
7373   set_lastFreeRangeCoalesced(false);
7374 
7375   set_freeFinger(freeFinger);
7376   set_freeRangeInFreeLists(freeRangeInFreeLists);
7377   if (CMSTestInFreeList) {
7378     if (freeRangeInFreeLists) {
7379       FreeChunk* fc = (FreeChunk*) freeFinger;
7380       assert(fc->is_free(), "A chunk on the free list should be free.");
7381       assert(fc->size() > 0, "Free range should have a size");
7382       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7383     }
7384   }
7385 }
7386 
7387 // Note that the sweeper runs concurrently with mutators. Thus,
7388 // it is possible for direct allocation in this generation to happen
7389 // in the middle of the sweep. Note that the sweeper also coalesces
7390 // contiguous free blocks. Thus, unless the sweeper and the allocator
7391 // synchronize appropriately freshly allocated blocks may get swept up.
7392 // This is accomplished by the sweeper locking the free lists while
7393 // it is sweeping. Thus blocks that are determined to be free are
7394 // indeed free. There is however one additional complication:
7395 // blocks that have been allocated since the final checkpoint and
7396 // mark, will not have been marked and so would be treated as
7397 // unreachable and swept up. To prevent this, the allocator marks
7398 // the bit map when allocating during the sweep phase. This leads,
7399 // however, to a further complication -- objects may have been allocated
7400 // but not yet initialized -- in the sense that the header isn't yet
7401 // installed. The sweeper can not then determine the size of the block
7402 // in order to skip over it. To deal with this case, we use a technique
7403 // (due to Printezis) to encode such uninitialized block sizes in the
7404 // bit map. Since the bit map uses a bit per every HeapWord, but the
7405 // CMS generation has a minimum object size of 3 HeapWords, it follows
7406 // that "normal marks" won't be adjacent in the bit map (there will
7407 // always be at least two 0 bits between successive 1 bits). We make use
7408 // of these "unused" bits to represent uninitialized blocks -- the bit
7409 // corresponding to the start of the uninitialized object and the next
7410 // bit are both set. Finally, a 1 bit marks the end of the object that
7411 // started with the two consecutive 1 bits to indicate its potentially
7412 // uninitialized state.
7413 
7414 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7415   FreeChunk* fc = (FreeChunk*)addr;
7416   size_t res;
7417 
7418   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7419   // than "addr == _limit" because although _limit was a block boundary when
7420   // we started the sweep, it may no longer be one because heap expansion
7421   // may have caused us to coalesce the block ending at the address _limit
7422   // with a newly expanded chunk (this happens when _limit was set to the
7423   // previous _end of the space), so we may have stepped past _limit:
7424   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7425   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7426     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7427            "sweep _limit out of bounds");
7428     assert(addr < _sp->end(), "addr out of bounds");
7429     // Flush any free range we might be holding as a single
7430     // coalesced chunk to the appropriate free list.
7431     if (inFreeRange()) {
7432       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7433              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", p2i(freeFinger())));
7434       flush_cur_free_chunk(freeFinger(),
7435                            pointer_delta(addr, freeFinger()));
7436       if (CMSTraceSweeper) {
7437         gclog_or_tty->print("Sweep: last chunk: ");
7438         gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7439                    "[coalesced:%d]\n",
7440                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7441                    lastFreeRangeCoalesced() ? 1 : 0);
7442       }
7443     }
7444 
7445     // help the iterator loop finish
7446     return pointer_delta(_sp->end(), addr);
7447   }
7448 
7449   assert(addr < _limit, "sweep invariant");
7450   // check if we should yield
7451   do_yield_check(addr);
7452   if (fc->is_free()) {
7453     // Chunk that is already free
7454     res = fc->size();
7455     do_already_free_chunk(fc);
7456     debug_only(_sp->verifyFreeLists());
7457     // If we flush the chunk at hand in lookahead_and_flush()
7458     // and it's coalesced with a preceding chunk, then the
7459     // process of "mangling" the payload of the coalesced block
7460     // will cause erasure of the size information from the
7461     // (erstwhile) header of all the coalesced blocks but the
7462     // first, so the first disjunct in the assert will not hold
7463     // in that specific case (in which case the second disjunct
7464     // will hold).
7465     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7466            "Otherwise the size info doesn't change at this step");
7467     NOT_PRODUCT(
7468       _numObjectsAlreadyFree++;
7469       _numWordsAlreadyFree += res;
7470     )
7471     NOT_PRODUCT(_last_fc = fc;)
7472   } else if (!_bitMap->isMarked(addr)) {
7473     // Chunk is fresh garbage
7474     res = do_garbage_chunk(fc);
7475     debug_only(_sp->verifyFreeLists());
7476     NOT_PRODUCT(
7477       _numObjectsFreed++;
7478       _numWordsFreed += res;
7479     )
7480   } else {
7481     // Chunk that is alive.
7482     res = do_live_chunk(fc);
7483     debug_only(_sp->verifyFreeLists());
7484     NOT_PRODUCT(
7485         _numObjectsLive++;
7486         _numWordsLive += res;
7487     )
7488   }
7489   return res;
7490 }
7491 
7492 // For the smart allocation, record following
7493 //  split deaths - a free chunk is removed from its free list because
7494 //      it is being split into two or more chunks.
7495 //  split birth - a free chunk is being added to its free list because
7496 //      a larger free chunk has been split and resulted in this free chunk.
7497 //  coal death - a free chunk is being removed from its free list because
7498 //      it is being coalesced into a large free chunk.
7499 //  coal birth - a free chunk is being added to its free list because
7500 //      it was created when two or more free chunks where coalesced into
7501 //      this free chunk.
7502 //
7503 // These statistics are used to determine the desired number of free
7504 // chunks of a given size.  The desired number is chosen to be relative
7505 // to the end of a CMS sweep.  The desired number at the end of a sweep
7506 // is the
7507 //      count-at-end-of-previous-sweep (an amount that was enough)
7508 //              - count-at-beginning-of-current-sweep  (the excess)
7509 //              + split-births  (gains in this size during interval)
7510 //              - split-deaths  (demands on this size during interval)
7511 // where the interval is from the end of one sweep to the end of the
7512 // next.
7513 //
7514 // When sweeping the sweeper maintains an accumulated chunk which is
7515 // the chunk that is made up of chunks that have been coalesced.  That
7516 // will be termed the left-hand chunk.  A new chunk of garbage that
7517 // is being considered for coalescing will be referred to as the
7518 // right-hand chunk.
7519 //
7520 // When making a decision on whether to coalesce a right-hand chunk with
7521 // the current left-hand chunk, the current count vs. the desired count
7522 // of the left-hand chunk is considered.  Also if the right-hand chunk
7523 // is near the large chunk at the end of the heap (see
7524 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7525 // left-hand chunk is coalesced.
7526 //
7527 // When making a decision about whether to split a chunk, the desired count
7528 // vs. the current count of the candidate to be split is also considered.
7529 // If the candidate is underpopulated (currently fewer chunks than desired)
7530 // a chunk of an overpopulated (currently more chunks than desired) size may
7531 // be chosen.  The "hint" associated with a free list, if non-null, points
7532 // to a free list which may be overpopulated.
7533 //
7534 
7535 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7536   const size_t size = fc->size();
7537   // Chunks that cannot be coalesced are not in the
7538   // free lists.
7539   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7540     assert(_sp->verify_chunk_in_free_list(fc),
7541       "free chunk should be in free lists");
7542   }
7543   // a chunk that is already free, should not have been
7544   // marked in the bit map
7545   HeapWord* const addr = (HeapWord*) fc;
7546   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7547   // Verify that the bit map has no bits marked between
7548   // addr and purported end of this block.
7549   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7550 
7551   // Some chunks cannot be coalesced under any circumstances.
7552   // See the definition of cantCoalesce().
7553   if (!fc->cantCoalesce()) {
7554     // This chunk can potentially be coalesced.
7555     if (_sp->adaptive_freelists()) {
7556       // All the work is done in
7557       do_post_free_or_garbage_chunk(fc, size);
7558     } else {  // Not adaptive free lists
7559       // this is a free chunk that can potentially be coalesced by the sweeper;
7560       if (!inFreeRange()) {
7561         // if the next chunk is a free block that can't be coalesced
7562         // it doesn't make sense to remove this chunk from the free lists
7563         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7564         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7565         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7566             nextChunk->is_free()               &&     // ... which is free...
7567             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7568           // nothing to do
7569         } else {
7570           // Potentially the start of a new free range:
7571           // Don't eagerly remove it from the free lists.
7572           // No need to remove it if it will just be put
7573           // back again.  (Also from a pragmatic point of view
7574           // if it is a free block in a region that is beyond
7575           // any allocated blocks, an assertion will fail)
7576           // Remember the start of a free run.
7577           initialize_free_range(addr, true);
7578           // end - can coalesce with next chunk
7579         }
7580       } else {
7581         // the midst of a free range, we are coalescing
7582         print_free_block_coalesced(fc);
7583         if (CMSTraceSweeper) {
7584           gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7585         }
7586         // remove it from the free lists
7587         _sp->removeFreeChunkFromFreeLists(fc);
7588         set_lastFreeRangeCoalesced(true);
7589         // If the chunk is being coalesced and the current free range is
7590         // in the free lists, remove the current free range so that it
7591         // will be returned to the free lists in its entirety - all
7592         // the coalesced pieces included.
7593         if (freeRangeInFreeLists()) {
7594           FreeChunk* ffc = (FreeChunk*) freeFinger();
7595           assert(ffc->size() == pointer_delta(addr, freeFinger()),
7596             "Size of free range is inconsistent with chunk size.");
7597           if (CMSTestInFreeList) {
7598             assert(_sp->verify_chunk_in_free_list(ffc),
7599               "free range is not in free lists");
7600           }
7601           _sp->removeFreeChunkFromFreeLists(ffc);
7602           set_freeRangeInFreeLists(false);
7603         }
7604       }
7605     }
7606     // Note that if the chunk is not coalescable (the else arm
7607     // below), we unconditionally flush, without needing to do
7608     // a "lookahead," as we do below.
7609     if (inFreeRange()) lookahead_and_flush(fc, size);
7610   } else {
7611     // Code path common to both original and adaptive free lists.
7612 
7613     // cant coalesce with previous block; this should be treated
7614     // as the end of a free run if any
7615     if (inFreeRange()) {
7616       // we kicked some butt; time to pick up the garbage
7617       assert(freeFinger() < addr, "freeFinger points too high");
7618       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7619     }
7620     // else, nothing to do, just continue
7621   }
7622 }
7623 
7624 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7625   // This is a chunk of garbage.  It is not in any free list.
7626   // Add it to a free list or let it possibly be coalesced into
7627   // a larger chunk.
7628   HeapWord* const addr = (HeapWord*) fc;
7629   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7630 
7631   if (_sp->adaptive_freelists()) {
7632     // Verify that the bit map has no bits marked between
7633     // addr and purported end of just dead object.
7634     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7635 
7636     do_post_free_or_garbage_chunk(fc, size);
7637   } else {
7638     if (!inFreeRange()) {
7639       // start of a new free range
7640       assert(size > 0, "A free range should have a size");
7641       initialize_free_range(addr, false);
7642     } else {
7643       // this will be swept up when we hit the end of the
7644       // free range
7645       if (CMSTraceSweeper) {
7646         gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7647       }
7648       // If the chunk is being coalesced and the current free range is
7649       // in the free lists, remove the current free range so that it
7650       // will be returned to the free lists in its entirety - all
7651       // the coalesced pieces included.
7652       if (freeRangeInFreeLists()) {
7653         FreeChunk* ffc = (FreeChunk*)freeFinger();
7654         assert(ffc->size() == pointer_delta(addr, freeFinger()),
7655           "Size of free range is inconsistent with chunk size.");
7656         if (CMSTestInFreeList) {
7657           assert(_sp->verify_chunk_in_free_list(ffc),
7658             "free range is not in free lists");
7659         }
7660         _sp->removeFreeChunkFromFreeLists(ffc);
7661         set_freeRangeInFreeLists(false);
7662       }
7663       set_lastFreeRangeCoalesced(true);
7664     }
7665     // this will be swept up when we hit the end of the free range
7666 
7667     // Verify that the bit map has no bits marked between
7668     // addr and purported end of just dead object.
7669     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7670   }
7671   assert(_limit >= addr + size,
7672          "A freshly garbage chunk can't possibly straddle over _limit");
7673   if (inFreeRange()) lookahead_and_flush(fc, size);
7674   return size;
7675 }
7676 
7677 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7678   HeapWord* addr = (HeapWord*) fc;
7679   // The sweeper has just found a live object. Return any accumulated
7680   // left hand chunk to the free lists.
7681   if (inFreeRange()) {
7682     assert(freeFinger() < addr, "freeFinger points too high");
7683     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7684   }
7685 
7686   // This object is live: we'd normally expect this to be
7687   // an oop, and like to assert the following:
7688   // assert(oop(addr)->is_oop(), "live block should be an oop");
7689   // However, as we commented above, this may be an object whose
7690   // header hasn't yet been initialized.
7691   size_t size;
7692   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7693   if (_bitMap->isMarked(addr + 1)) {
7694     // Determine the size from the bit map, rather than trying to
7695     // compute it from the object header.
7696     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7697     size = pointer_delta(nextOneAddr + 1, addr);
7698     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7699            "alignment problem");
7700 
7701 #ifdef ASSERT
7702       if (oop(addr)->klass_or_null() != NULL) {
7703         // Ignore mark word because we are running concurrent with mutators
7704         assert(oop(addr)->is_oop(true), "live block should be an oop");
7705         assert(size ==
7706                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7707                "P-mark and computed size do not agree");
7708       }
7709 #endif
7710 
7711   } else {
7712     // This should be an initialized object that's alive.
7713     assert(oop(addr)->klass_or_null() != NULL,
7714            "Should be an initialized object");
7715     // Ignore mark word because we are running concurrent with mutators
7716     assert(oop(addr)->is_oop(true), "live block should be an oop");
7717     // Verify that the bit map has no bits marked between
7718     // addr and purported end of this block.
7719     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7720     assert(size >= 3, "Necessary for Printezis marks to work");
7721     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7722     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7723   }
7724   return size;
7725 }
7726 
7727 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7728                                                  size_t chunkSize) {
7729   // do_post_free_or_garbage_chunk() should only be called in the case
7730   // of the adaptive free list allocator.
7731   const bool fcInFreeLists = fc->is_free();
7732   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7733   assert((HeapWord*)fc <= _limit, "sweep invariant");
7734   if (CMSTestInFreeList && fcInFreeLists) {
7735     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7736   }
7737 
7738   if (CMSTraceSweeper) {
7739     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7740   }
7741 
7742   HeapWord* const fc_addr = (HeapWord*) fc;
7743 
7744   bool coalesce;
7745   const size_t left  = pointer_delta(fc_addr, freeFinger());
7746   const size_t right = chunkSize;
7747   switch (FLSCoalescePolicy) {
7748     // numeric value forms a coalition aggressiveness metric
7749     case 0:  { // never coalesce
7750       coalesce = false;
7751       break;
7752     }
7753     case 1: { // coalesce if left & right chunks on overpopulated lists
7754       coalesce = _sp->coalOverPopulated(left) &&
7755                  _sp->coalOverPopulated(right);
7756       break;
7757     }
7758     case 2: { // coalesce if left chunk on overpopulated list (default)
7759       coalesce = _sp->coalOverPopulated(left);
7760       break;
7761     }
7762     case 3: { // coalesce if left OR right chunk on overpopulated list
7763       coalesce = _sp->coalOverPopulated(left) ||
7764                  _sp->coalOverPopulated(right);
7765       break;
7766     }
7767     case 4: { // always coalesce
7768       coalesce = true;
7769       break;
7770     }
7771     default:
7772      ShouldNotReachHere();
7773   }
7774 
7775   // Should the current free range be coalesced?
7776   // If the chunk is in a free range and either we decided to coalesce above
7777   // or the chunk is near the large block at the end of the heap
7778   // (isNearLargestChunk() returns true), then coalesce this chunk.
7779   const bool doCoalesce = inFreeRange()
7780                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7781   if (doCoalesce) {
7782     // Coalesce the current free range on the left with the new
7783     // chunk on the right.  If either is on a free list,
7784     // it must be removed from the list and stashed in the closure.
7785     if (freeRangeInFreeLists()) {
7786       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7787       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7788         "Size of free range is inconsistent with chunk size.");
7789       if (CMSTestInFreeList) {
7790         assert(_sp->verify_chunk_in_free_list(ffc),
7791           "Chunk is not in free lists");
7792       }
7793       _sp->coalDeath(ffc->size());
7794       _sp->removeFreeChunkFromFreeLists(ffc);
7795       set_freeRangeInFreeLists(false);
7796     }
7797     if (fcInFreeLists) {
7798       _sp->coalDeath(chunkSize);
7799       assert(fc->size() == chunkSize,
7800         "The chunk has the wrong size or is not in the free lists");
7801       _sp->removeFreeChunkFromFreeLists(fc);
7802     }
7803     set_lastFreeRangeCoalesced(true);
7804     print_free_block_coalesced(fc);
7805   } else {  // not in a free range and/or should not coalesce
7806     // Return the current free range and start a new one.
7807     if (inFreeRange()) {
7808       // In a free range but cannot coalesce with the right hand chunk.
7809       // Put the current free range into the free lists.
7810       flush_cur_free_chunk(freeFinger(),
7811                            pointer_delta(fc_addr, freeFinger()));
7812     }
7813     // Set up for new free range.  Pass along whether the right hand
7814     // chunk is in the free lists.
7815     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7816   }
7817 }
7818 
7819 // Lookahead flush:
7820 // If we are tracking a free range, and this is the last chunk that
7821 // we'll look at because its end crosses past _limit, we'll preemptively
7822 // flush it along with any free range we may be holding on to. Note that
7823 // this can be the case only for an already free or freshly garbage
7824 // chunk. If this block is an object, it can never straddle
7825 // over _limit. The "straddling" occurs when _limit is set at
7826 // the previous end of the space when this cycle started, and
7827 // a subsequent heap expansion caused the previously co-terminal
7828 // free block to be coalesced with the newly expanded portion,
7829 // thus rendering _limit a non-block-boundary making it dangerous
7830 // for the sweeper to step over and examine.
7831 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7832   assert(inFreeRange(), "Should only be called if currently in a free range.");
7833   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7834   assert(_sp->used_region().contains(eob - 1),
7835          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7836                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7837                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7838                  p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7839   if (eob >= _limit) {
7840     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7841     if (CMSTraceSweeper) {
7842       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7843                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7844                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7845                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7846     }
7847     // Return the storage we are tracking back into the free lists.
7848     if (CMSTraceSweeper) {
7849       gclog_or_tty->print_cr("Flushing ... ");
7850     }
7851     assert(freeFinger() < eob, "Error");
7852     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7853   }
7854 }
7855 
7856 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7857   assert(inFreeRange(), "Should only be called if currently in a free range.");
7858   assert(size > 0,
7859     "A zero sized chunk cannot be added to the free lists.");
7860   if (!freeRangeInFreeLists()) {
7861     if (CMSTestInFreeList) {
7862       FreeChunk* fc = (FreeChunk*) chunk;
7863       fc->set_size(size);
7864       assert(!_sp->verify_chunk_in_free_list(fc),
7865         "chunk should not be in free lists yet");
7866     }
7867     if (CMSTraceSweeper) {
7868       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7869                     p2i(chunk), size);
7870     }
7871     // A new free range is going to be starting.  The current
7872     // free range has not been added to the free lists yet or
7873     // was removed so add it back.
7874     // If the current free range was coalesced, then the death
7875     // of the free range was recorded.  Record a birth now.
7876     if (lastFreeRangeCoalesced()) {
7877       _sp->coalBirth(size);
7878     }
7879     _sp->addChunkAndRepairOffsetTable(chunk, size,
7880             lastFreeRangeCoalesced());
7881   } else if (CMSTraceSweeper) {
7882     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7883   }
7884   set_inFreeRange(false);
7885   set_freeRangeInFreeLists(false);
7886 }
7887 
7888 // We take a break if we've been at this for a while,
7889 // so as to avoid monopolizing the locks involved.
7890 void SweepClosure::do_yield_work(HeapWord* addr) {
7891   // Return current free chunk being used for coalescing (if any)
7892   // to the appropriate freelist.  After yielding, the next
7893   // free block encountered will start a coalescing range of
7894   // free blocks.  If the next free block is adjacent to the
7895   // chunk just flushed, they will need to wait for the next
7896   // sweep to be coalesced.
7897   if (inFreeRange()) {
7898     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7899   }
7900 
7901   // First give up the locks, then yield, then re-lock.
7902   // We should probably use a constructor/destructor idiom to
7903   // do this unlock/lock or modify the MutexUnlocker class to
7904   // serve our purpose. XXX
7905   assert_lock_strong(_bitMap->lock());
7906   assert_lock_strong(_freelistLock);
7907   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7908          "CMS thread should hold CMS token");
7909   _bitMap->lock()->unlock();
7910   _freelistLock->unlock();
7911   ConcurrentMarkSweepThread::desynchronize(true);
7912   _collector->stopTimer();
7913   if (PrintCMSStatistics != 0) {
7914     _collector->incrementYields();
7915   }
7916 
7917   // See the comment in coordinator_yield()
7918   for (unsigned i = 0; i < CMSYieldSleepCount &&
7919                        ConcurrentMarkSweepThread::should_yield() &&
7920                        !CMSCollector::foregroundGCIsActive(); ++i) {
7921     os::sleep(Thread::current(), 1, false);
7922   }
7923 
7924   ConcurrentMarkSweepThread::synchronize(true);
7925   _freelistLock->lock();
7926   _bitMap->lock()->lock_without_safepoint_check();
7927   _collector->startTimer();
7928 }
7929 
7930 #ifndef PRODUCT
7931 // This is actually very useful in a product build if it can
7932 // be called from the debugger.  Compile it into the product
7933 // as needed.
7934 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7935   return debug_cms_space->verify_chunk_in_free_list(fc);
7936 }
7937 #endif
7938 
7939 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7940   if (CMSTraceSweeper) {
7941     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7942                            p2i(fc), fc->size());
7943   }
7944 }
7945 
7946 // CMSIsAliveClosure
7947 bool CMSIsAliveClosure::do_object_b(oop obj) {
7948   HeapWord* addr = (HeapWord*)obj;
7949   return addr != NULL &&
7950          (!_span.contains(addr) || _bit_map->isMarked(addr));
7951 }
7952 
7953 
7954 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7955                       MemRegion span,
7956                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7957                       bool cpc):
7958   _collector(collector),
7959   _span(span),
7960   _bit_map(bit_map),
7961   _mark_stack(mark_stack),
7962   _concurrent_precleaning(cpc) {
7963   assert(!_span.is_empty(), "Empty span could spell trouble");
7964 }
7965 
7966 
7967 // CMSKeepAliveClosure: the serial version
7968 void CMSKeepAliveClosure::do_oop(oop obj) {
7969   HeapWord* addr = (HeapWord*)obj;
7970   if (_span.contains(addr) &&
7971       !_bit_map->isMarked(addr)) {
7972     _bit_map->mark(addr);
7973     bool simulate_overflow = false;
7974     NOT_PRODUCT(
7975       if (CMSMarkStackOverflowALot &&
7976           _collector->simulate_overflow()) {
7977         // simulate a stack overflow
7978         simulate_overflow = true;
7979       }
7980     )
7981     if (simulate_overflow || !_mark_stack->push(obj)) {
7982       if (_concurrent_precleaning) {
7983         // We dirty the overflown object and let the remark
7984         // phase deal with it.
7985         assert(_collector->overflow_list_is_empty(), "Error");
7986         // In the case of object arrays, we need to dirty all of
7987         // the cards that the object spans. No locking or atomics
7988         // are needed since no one else can be mutating the mod union
7989         // table.
7990         if (obj->is_objArray()) {
7991           size_t sz = obj->size();
7992           HeapWord* end_card_addr =
7993             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7994           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7995           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7996           _collector->_modUnionTable.mark_range(redirty_range);
7997         } else {
7998           _collector->_modUnionTable.mark(addr);
7999         }
8000         _collector->_ser_kac_preclean_ovflw++;
8001       } else {
8002         _collector->push_on_overflow_list(obj);
8003         _collector->_ser_kac_ovflw++;
8004       }
8005     }
8006   }
8007 }
8008 
8009 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8010 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8011 
8012 // CMSParKeepAliveClosure: a parallel version of the above.
8013 // The work queues are private to each closure (thread),
8014 // but (may be) available for stealing by other threads.
8015 void CMSParKeepAliveClosure::do_oop(oop obj) {
8016   HeapWord* addr = (HeapWord*)obj;
8017   if (_span.contains(addr) &&
8018       !_bit_map->isMarked(addr)) {
8019     // In general, during recursive tracing, several threads
8020     // may be concurrently getting here; the first one to
8021     // "tag" it, claims it.
8022     if (_bit_map->par_mark(addr)) {
8023       bool res = _work_queue->push(obj);
8024       assert(res, "Low water mark should be much less than capacity");
8025       // Do a recursive trim in the hope that this will keep
8026       // stack usage lower, but leave some oops for potential stealers
8027       trim_queue(_low_water_mark);
8028     } // Else, another thread got there first
8029   }
8030 }
8031 
8032 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8033 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8034 
8035 void CMSParKeepAliveClosure::trim_queue(uint max) {
8036   while (_work_queue->size() > max) {
8037     oop new_oop;
8038     if (_work_queue->pop_local(new_oop)) {
8039       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8040       assert(_bit_map->isMarked((HeapWord*)new_oop),
8041              "no white objects on this stack!");
8042       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8043       // iterate over the oops in this oop, marking and pushing
8044       // the ones in CMS heap (i.e. in _span).
8045       new_oop->oop_iterate(&_mark_and_push);
8046     }
8047   }
8048 }
8049 
8050 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8051                                 CMSCollector* collector,
8052                                 MemRegion span, CMSBitMap* bit_map,
8053                                 OopTaskQueue* work_queue):
8054   _collector(collector),
8055   _span(span),
8056   _bit_map(bit_map),
8057   _work_queue(work_queue) { }
8058 
8059 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8060   HeapWord* addr = (HeapWord*)obj;
8061   if (_span.contains(addr) &&
8062       !_bit_map->isMarked(addr)) {
8063     if (_bit_map->par_mark(addr)) {
8064       bool simulate_overflow = false;
8065       NOT_PRODUCT(
8066         if (CMSMarkStackOverflowALot &&
8067             _collector->par_simulate_overflow()) {
8068           // simulate a stack overflow
8069           simulate_overflow = true;
8070         }
8071       )
8072       if (simulate_overflow || !_work_queue->push(obj)) {
8073         _collector->par_push_on_overflow_list(obj);
8074         _collector->_par_kac_ovflw++;
8075       }
8076     } // Else another thread got there already
8077   }
8078 }
8079 
8080 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8081 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8082 
8083 //////////////////////////////////////////////////////////////////
8084 //  CMSExpansionCause                /////////////////////////////
8085 //////////////////////////////////////////////////////////////////
8086 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8087   switch (cause) {
8088     case _no_expansion:
8089       return "No expansion";
8090     case _satisfy_free_ratio:
8091       return "Free ratio";
8092     case _satisfy_promotion:
8093       return "Satisfy promotion";
8094     case _satisfy_allocation:
8095       return "allocation";
8096     case _allocate_par_lab:
8097       return "Par LAB";
8098     case _allocate_par_spooling_space:
8099       return "Par Spooling Space";
8100     case _adaptive_size_policy:
8101       return "Ergonomics";
8102     default:
8103       return "unknown";
8104   }
8105 }
8106 
8107 void CMSDrainMarkingStackClosure::do_void() {
8108   // the max number to take from overflow list at a time
8109   const size_t num = _mark_stack->capacity()/4;
8110   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8111          "Overflow list should be NULL during concurrent phases");
8112   while (!_mark_stack->isEmpty() ||
8113          // if stack is empty, check the overflow list
8114          _collector->take_from_overflow_list(num, _mark_stack)) {
8115     oop obj = _mark_stack->pop();
8116     HeapWord* addr = (HeapWord*)obj;
8117     assert(_span.contains(addr), "Should be within span");
8118     assert(_bit_map->isMarked(addr), "Should be marked");
8119     assert(obj->is_oop(), "Should be an oop");
8120     obj->oop_iterate(_keep_alive);
8121   }
8122 }
8123 
8124 void CMSParDrainMarkingStackClosure::do_void() {
8125   // drain queue
8126   trim_queue(0);
8127 }
8128 
8129 // Trim our work_queue so its length is below max at return
8130 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8131   while (_work_queue->size() > max) {
8132     oop new_oop;
8133     if (_work_queue->pop_local(new_oop)) {
8134       assert(new_oop->is_oop(), "Expected an oop");
8135       assert(_bit_map->isMarked((HeapWord*)new_oop),
8136              "no white objects on this stack!");
8137       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8138       // iterate over the oops in this oop, marking and pushing
8139       // the ones in CMS heap (i.e. in _span).
8140       new_oop->oop_iterate(&_mark_and_push);
8141     }
8142   }
8143 }
8144 
8145 ////////////////////////////////////////////////////////////////////
8146 // Support for Marking Stack Overflow list handling and related code
8147 ////////////////////////////////////////////////////////////////////
8148 // Much of the following code is similar in shape and spirit to the
8149 // code used in ParNewGC. We should try and share that code
8150 // as much as possible in the future.
8151 
8152 #ifndef PRODUCT
8153 // Debugging support for CMSStackOverflowALot
8154 
8155 // It's OK to call this multi-threaded;  the worst thing
8156 // that can happen is that we'll get a bunch of closely
8157 // spaced simulated overflows, but that's OK, in fact
8158 // probably good as it would exercise the overflow code
8159 // under contention.
8160 bool CMSCollector::simulate_overflow() {
8161   if (_overflow_counter-- <= 0) { // just being defensive
8162     _overflow_counter = CMSMarkStackOverflowInterval;
8163     return true;
8164   } else {
8165     return false;
8166   }
8167 }
8168 
8169 bool CMSCollector::par_simulate_overflow() {
8170   return simulate_overflow();
8171 }
8172 #endif
8173 
8174 // Single-threaded
8175 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8176   assert(stack->isEmpty(), "Expected precondition");
8177   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8178   size_t i = num;
8179   oop  cur = _overflow_list;
8180   const markOop proto = markOopDesc::prototype();
8181   NOT_PRODUCT(ssize_t n = 0;)
8182   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8183     next = oop(cur->mark());
8184     cur->set_mark(proto);   // until proven otherwise
8185     assert(cur->is_oop(), "Should be an oop");
8186     bool res = stack->push(cur);
8187     assert(res, "Bit off more than can chew?");
8188     NOT_PRODUCT(n++;)
8189   }
8190   _overflow_list = cur;
8191 #ifndef PRODUCT
8192   assert(_num_par_pushes >= n, "Too many pops?");
8193   _num_par_pushes -=n;
8194 #endif
8195   return !stack->isEmpty();
8196 }
8197 
8198 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8199 // (MT-safe) Get a prefix of at most "num" from the list.
8200 // The overflow list is chained through the mark word of
8201 // each object in the list. We fetch the entire list,
8202 // break off a prefix of the right size and return the
8203 // remainder. If other threads try to take objects from
8204 // the overflow list at that time, they will wait for
8205 // some time to see if data becomes available. If (and
8206 // only if) another thread places one or more object(s)
8207 // on the global list before we have returned the suffix
8208 // to the global list, we will walk down our local list
8209 // to find its end and append the global list to
8210 // our suffix before returning it. This suffix walk can
8211 // prove to be expensive (quadratic in the amount of traffic)
8212 // when there are many objects in the overflow list and
8213 // there is much producer-consumer contention on the list.
8214 // *NOTE*: The overflow list manipulation code here and
8215 // in ParNewGeneration:: are very similar in shape,
8216 // except that in the ParNew case we use the old (from/eden)
8217 // copy of the object to thread the list via its klass word.
8218 // Because of the common code, if you make any changes in
8219 // the code below, please check the ParNew version to see if
8220 // similar changes might be needed.
8221 // CR 6797058 has been filed to consolidate the common code.
8222 bool CMSCollector::par_take_from_overflow_list(size_t num,
8223                                                OopTaskQueue* work_q,
8224                                                int no_of_gc_threads) {
8225   assert(work_q->size() == 0, "First empty local work queue");
8226   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8227   if (_overflow_list == NULL) {
8228     return false;
8229   }
8230   // Grab the entire list; we'll put back a suffix
8231   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8232   Thread* tid = Thread::current();
8233   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8234   // set to ParallelGCThreads.
8235   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8236   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8237   // If the list is busy, we spin for a short while,
8238   // sleeping between attempts to get the list.
8239   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8240     os::sleep(tid, sleep_time_millis, false);
8241     if (_overflow_list == NULL) {
8242       // Nothing left to take
8243       return false;
8244     } else if (_overflow_list != BUSY) {
8245       // Try and grab the prefix
8246       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8247     }
8248   }
8249   // If the list was found to be empty, or we spun long
8250   // enough, we give up and return empty-handed. If we leave
8251   // the list in the BUSY state below, it must be the case that
8252   // some other thread holds the overflow list and will set it
8253   // to a non-BUSY state in the future.
8254   if (prefix == NULL || prefix == BUSY) {
8255      // Nothing to take or waited long enough
8256      if (prefix == NULL) {
8257        // Write back the NULL in case we overwrote it with BUSY above
8258        // and it is still the same value.
8259        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8260      }
8261      return false;
8262   }
8263   assert(prefix != NULL && prefix != BUSY, "Error");
8264   size_t i = num;
8265   oop cur = prefix;
8266   // Walk down the first "num" objects, unless we reach the end.
8267   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8268   if (cur->mark() == NULL) {
8269     // We have "num" or fewer elements in the list, so there
8270     // is nothing to return to the global list.
8271     // Write back the NULL in lieu of the BUSY we wrote
8272     // above, if it is still the same value.
8273     if (_overflow_list == BUSY) {
8274       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8275     }
8276   } else {
8277     // Chop off the suffix and return it to the global list.
8278     assert(cur->mark() != BUSY, "Error");
8279     oop suffix_head = cur->mark(); // suffix will be put back on global list
8280     cur->set_mark(NULL);           // break off suffix
8281     // It's possible that the list is still in the empty(busy) state
8282     // we left it in a short while ago; in that case we may be
8283     // able to place back the suffix without incurring the cost
8284     // of a walk down the list.
8285     oop observed_overflow_list = _overflow_list;
8286     oop cur_overflow_list = observed_overflow_list;
8287     bool attached = false;
8288     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8289       observed_overflow_list =
8290         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8291       if (cur_overflow_list == observed_overflow_list) {
8292         attached = true;
8293         break;
8294       } else cur_overflow_list = observed_overflow_list;
8295     }
8296     if (!attached) {
8297       // Too bad, someone else sneaked in (at least) an element; we'll need
8298       // to do a splice. Find tail of suffix so we can prepend suffix to global
8299       // list.
8300       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8301       oop suffix_tail = cur;
8302       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8303              "Tautology");
8304       observed_overflow_list = _overflow_list;
8305       do {
8306         cur_overflow_list = observed_overflow_list;
8307         if (cur_overflow_list != BUSY) {
8308           // Do the splice ...
8309           suffix_tail->set_mark(markOop(cur_overflow_list));
8310         } else { // cur_overflow_list == BUSY
8311           suffix_tail->set_mark(NULL);
8312         }
8313         // ... and try to place spliced list back on overflow_list ...
8314         observed_overflow_list =
8315           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8316       } while (cur_overflow_list != observed_overflow_list);
8317       // ... until we have succeeded in doing so.
8318     }
8319   }
8320 
8321   // Push the prefix elements on work_q
8322   assert(prefix != NULL, "control point invariant");
8323   const markOop proto = markOopDesc::prototype();
8324   oop next;
8325   NOT_PRODUCT(ssize_t n = 0;)
8326   for (cur = prefix; cur != NULL; cur = next) {
8327     next = oop(cur->mark());
8328     cur->set_mark(proto);   // until proven otherwise
8329     assert(cur->is_oop(), "Should be an oop");
8330     bool res = work_q->push(cur);
8331     assert(res, "Bit off more than we can chew?");
8332     NOT_PRODUCT(n++;)
8333   }
8334 #ifndef PRODUCT
8335   assert(_num_par_pushes >= n, "Too many pops?");
8336   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8337 #endif
8338   return true;
8339 }
8340 
8341 // Single-threaded
8342 void CMSCollector::push_on_overflow_list(oop p) {
8343   NOT_PRODUCT(_num_par_pushes++;)
8344   assert(p->is_oop(), "Not an oop");
8345   preserve_mark_if_necessary(p);
8346   p->set_mark((markOop)_overflow_list);
8347   _overflow_list = p;
8348 }
8349 
8350 // Multi-threaded; use CAS to prepend to overflow list
8351 void CMSCollector::par_push_on_overflow_list(oop p) {
8352   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8353   assert(p->is_oop(), "Not an oop");
8354   par_preserve_mark_if_necessary(p);
8355   oop observed_overflow_list = _overflow_list;
8356   oop cur_overflow_list;
8357   do {
8358     cur_overflow_list = observed_overflow_list;
8359     if (cur_overflow_list != BUSY) {
8360       p->set_mark(markOop(cur_overflow_list));
8361     } else {
8362       p->set_mark(NULL);
8363     }
8364     observed_overflow_list =
8365       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8366   } while (cur_overflow_list != observed_overflow_list);
8367 }
8368 #undef BUSY
8369 
8370 // Single threaded
8371 // General Note on GrowableArray: pushes may silently fail
8372 // because we are (temporarily) out of C-heap for expanding
8373 // the stack. The problem is quite ubiquitous and affects
8374 // a lot of code in the JVM. The prudent thing for GrowableArray
8375 // to do (for now) is to exit with an error. However, that may
8376 // be too draconian in some cases because the caller may be
8377 // able to recover without much harm. For such cases, we
8378 // should probably introduce a "soft_push" method which returns
8379 // an indication of success or failure with the assumption that
8380 // the caller may be able to recover from a failure; code in
8381 // the VM can then be changed, incrementally, to deal with such
8382 // failures where possible, thus, incrementally hardening the VM
8383 // in such low resource situations.
8384 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8385   _preserved_oop_stack.push(p);
8386   _preserved_mark_stack.push(m);
8387   assert(m == p->mark(), "Mark word changed");
8388   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8389          "bijection");
8390 }
8391 
8392 // Single threaded
8393 void CMSCollector::preserve_mark_if_necessary(oop p) {
8394   markOop m = p->mark();
8395   if (m->must_be_preserved(p)) {
8396     preserve_mark_work(p, m);
8397   }
8398 }
8399 
8400 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8401   markOop m = p->mark();
8402   if (m->must_be_preserved(p)) {
8403     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8404     // Even though we read the mark word without holding
8405     // the lock, we are assured that it will not change
8406     // because we "own" this oop, so no other thread can
8407     // be trying to push it on the overflow list; see
8408     // the assertion in preserve_mark_work() that checks
8409     // that m == p->mark().
8410     preserve_mark_work(p, m);
8411   }
8412 }
8413 
8414 // We should be able to do this multi-threaded,
8415 // a chunk of stack being a task (this is
8416 // correct because each oop only ever appears
8417 // once in the overflow list. However, it's
8418 // not very easy to completely overlap this with
8419 // other operations, so will generally not be done
8420 // until all work's been completed. Because we
8421 // expect the preserved oop stack (set) to be small,
8422 // it's probably fine to do this single-threaded.
8423 // We can explore cleverer concurrent/overlapped/parallel
8424 // processing of preserved marks if we feel the
8425 // need for this in the future. Stack overflow should
8426 // be so rare in practice and, when it happens, its
8427 // effect on performance so great that this will
8428 // likely just be in the noise anyway.
8429 void CMSCollector::restore_preserved_marks_if_any() {
8430   assert(SafepointSynchronize::is_at_safepoint(),
8431          "world should be stopped");
8432   assert(Thread::current()->is_ConcurrentGC_thread() ||
8433          Thread::current()->is_VM_thread(),
8434          "should be single-threaded");
8435   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8436          "bijection");
8437 
8438   while (!_preserved_oop_stack.is_empty()) {
8439     oop p = _preserved_oop_stack.pop();
8440     assert(p->is_oop(), "Should be an oop");
8441     assert(_span.contains(p), "oop should be in _span");
8442     assert(p->mark() == markOopDesc::prototype(),
8443            "Set when taken from overflow list");
8444     markOop m = _preserved_mark_stack.pop();
8445     p->set_mark(m);
8446   }
8447   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8448          "stacks were cleared above");
8449 }
8450 
8451 #ifndef PRODUCT
8452 bool CMSCollector::no_preserved_marks() const {
8453   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8454 }
8455 #endif
8456 
8457 // Transfer some number of overflown objects to usual marking
8458 // stack. Return true if some objects were transferred.
8459 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8460   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8461                     (size_t)ParGCDesiredObjsFromOverflowList);
8462 
8463   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8464   assert(_collector->overflow_list_is_empty() || res,
8465          "If list is not empty, we should have taken something");
8466   assert(!res || !_mark_stack->isEmpty(),
8467          "If we took something, it should now be on our stack");
8468   return res;
8469 }
8470 
8471 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8472   size_t res = _sp->block_size_no_stall(addr, _collector);
8473   if (_sp->block_is_obj(addr)) {
8474     if (_live_bit_map->isMarked(addr)) {
8475       // It can't have been dead in a previous cycle
8476       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8477     } else {
8478       _dead_bit_map->mark(addr);      // mark the dead object
8479     }
8480   }
8481   // Could be 0, if the block size could not be computed without stalling.
8482   return res;
8483 }
8484 
8485 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8486 
8487   switch (phase) {
8488     case CMSCollector::InitialMarking:
8489       initialize(true  /* fullGC */ ,
8490                  cause /* cause of the GC */,
8491                  true  /* recordGCBeginTime */,
8492                  true  /* recordPreGCUsage */,
8493                  false /* recordPeakUsage */,
8494                  false /* recordPostGCusage */,
8495                  true  /* recordAccumulatedGCTime */,
8496                  false /* recordGCEndTime */,
8497                  false /* countCollection */  );
8498       break;
8499 
8500     case CMSCollector::FinalMarking:
8501       initialize(true  /* fullGC */ ,
8502                  cause /* cause of the GC */,
8503                  false /* recordGCBeginTime */,
8504                  false /* recordPreGCUsage */,
8505                  false /* recordPeakUsage */,
8506                  false /* recordPostGCusage */,
8507                  true  /* recordAccumulatedGCTime */,
8508                  false /* recordGCEndTime */,
8509                  false /* countCollection */  );
8510       break;
8511 
8512     case CMSCollector::Sweeping:
8513       initialize(true  /* fullGC */ ,
8514                  cause /* cause of the GC */,
8515                  false /* recordGCBeginTime */,
8516                  false /* recordPreGCUsage */,
8517                  true  /* recordPeakUsage */,
8518                  true  /* recordPostGCusage */,
8519                  false /* recordAccumulatedGCTime */,
8520                  true  /* recordGCEndTime */,
8521                  true  /* countCollection */  );
8522       break;
8523 
8524     default:
8525       ShouldNotReachHere();
8526   }
8527 }