1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/gcTimer.hpp"
  41 #include "gc_implementation/shared/gcTrace.hpp"
  42 #include "gc_implementation/shared/gcTraceTime.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/collectedHeap.inline.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/referencePolicy.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "memory/tenuredGeneration.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/memoryService.hpp"
  63 #include "services/runtimeService.hpp"
  64 
  65 // statics
  66 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  67 bool CMSCollector::_full_gc_requested = false;
  68 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  69 
  70 //////////////////////////////////////////////////////////////////
  71 // In support of CMS/VM thread synchronization
  72 //////////////////////////////////////////////////////////////////
  73 // We split use of the CGC_lock into 2 "levels".
  74 // The low-level locking is of the usual CGC_lock monitor. We introduce
  75 // a higher level "token" (hereafter "CMS token") built on top of the
  76 // low level monitor (hereafter "CGC lock").
  77 // The token-passing protocol gives priority to the VM thread. The
  78 // CMS-lock doesn't provide any fairness guarantees, but clients
  79 // should ensure that it is only held for very short, bounded
  80 // durations.
  81 //
  82 // When either of the CMS thread or the VM thread is involved in
  83 // collection operations during which it does not want the other
  84 // thread to interfere, it obtains the CMS token.
  85 //
  86 // If either thread tries to get the token while the other has
  87 // it, that thread waits. However, if the VM thread and CMS thread
  88 // both want the token, then the VM thread gets priority while the
  89 // CMS thread waits. This ensures, for instance, that the "concurrent"
  90 // phases of the CMS thread's work do not block out the VM thread
  91 // for long periods of time as the CMS thread continues to hog
  92 // the token. (See bug 4616232).
  93 //
  94 // The baton-passing functions are, however, controlled by the
  95 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  96 // and here the low-level CMS lock, not the high level token,
  97 // ensures mutual exclusion.
  98 //
  99 // Two important conditions that we have to satisfy:
 100 // 1. if a thread does a low-level wait on the CMS lock, then it
 101 //    relinquishes the CMS token if it were holding that token
 102 //    when it acquired the low-level CMS lock.
 103 // 2. any low-level notifications on the low-level lock
 104 //    should only be sent when a thread has relinquished the token.
 105 //
 106 // In the absence of either property, we'd have potential deadlock.
 107 //
 108 // We protect each of the CMS (concurrent and sequential) phases
 109 // with the CMS _token_, not the CMS _lock_.
 110 //
 111 // The only code protected by CMS lock is the token acquisition code
 112 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 113 // baton-passing code.
 114 //
 115 // Unfortunately, i couldn't come up with a good abstraction to factor and
 116 // hide the naked CGC_lock manipulation in the baton-passing code
 117 // further below. That's something we should try to do. Also, the proof
 118 // of correctness of this 2-level locking scheme is far from obvious,
 119 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 120 // that there may be a theoretical possibility of delay/starvation in the
 121 // low-level lock/wait/notify scheme used for the baton-passing because of
 122 // potential intereference with the priority scheme embodied in the
 123 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 124 // invocation further below and marked with "XXX 20011219YSR".
 125 // Indeed, as we note elsewhere, this may become yet more slippery
 126 // in the presence of multiple CMS and/or multiple VM threads. XXX
 127 
 128 class CMSTokenSync: public StackObj {
 129  private:
 130   bool _is_cms_thread;
 131  public:
 132   CMSTokenSync(bool is_cms_thread):
 133     _is_cms_thread(is_cms_thread) {
 134     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 135            "Incorrect argument to constructor");
 136     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 137   }
 138 
 139   ~CMSTokenSync() {
 140     assert(_is_cms_thread ?
 141              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 142              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 143           "Incorrect state");
 144     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 145   }
 146 };
 147 
 148 // Convenience class that does a CMSTokenSync, and then acquires
 149 // upto three locks.
 150 class CMSTokenSyncWithLocks: public CMSTokenSync {
 151  private:
 152   // Note: locks are acquired in textual declaration order
 153   // and released in the opposite order
 154   MutexLockerEx _locker1, _locker2, _locker3;
 155  public:
 156   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 157                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 158     CMSTokenSync(is_cms_thread),
 159     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 160     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 161     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 162   { }
 163 };
 164 
 165 
 166 // Wrapper class to temporarily disable icms during a foreground cms collection.
 167 class ICMSDisabler: public StackObj {
 168  public:
 169   // The ctor disables icms and wakes up the thread so it notices the change;
 170   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 171   // CMSIncrementalMode.
 172   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 173   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 174 };
 175 
 176 //////////////////////////////////////////////////////////////////
 177 //  Concurrent Mark-Sweep Generation /////////////////////////////
 178 //////////////////////////////////////////////////////////////////
 179 
 180 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 181 
 182 // This struct contains per-thread things necessary to support parallel
 183 // young-gen collection.
 184 class CMSParGCThreadState: public CHeapObj<mtGC> {
 185  public:
 186   CFLS_LAB lab;
 187   PromotionInfo promo;
 188 
 189   // Constructor.
 190   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 191     promo.setSpace(cfls);
 192   }
 193 };
 194 
 195 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 196      ReservedSpace rs, size_t initial_byte_size, int level,
 197      CardTableRS* ct, bool use_adaptive_freelists,
 198      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 199   CardGeneration(rs, initial_byte_size, level, ct),
 200   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 201   _debug_collection_type(Concurrent_collection_type),
 202   _did_compact(false)
 203 {
 204   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 205   HeapWord* end    = (HeapWord*) _virtual_space.high();
 206 
 207   _direct_allocated_words = 0;
 208   NOT_PRODUCT(
 209     _numObjectsPromoted = 0;
 210     _numWordsPromoted = 0;
 211     _numObjectsAllocated = 0;
 212     _numWordsAllocated = 0;
 213   )
 214 
 215   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 216                                            use_adaptive_freelists,
 217                                            dictionaryChoice);
 218   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 219   if (_cmsSpace == NULL) {
 220     vm_exit_during_initialization(
 221       "CompactibleFreeListSpace allocation failure");
 222   }
 223   _cmsSpace->_gen = this;
 224 
 225   _gc_stats = new CMSGCStats();
 226 
 227   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 228   // offsets match. The ability to tell free chunks from objects
 229   // depends on this property.
 230   debug_only(
 231     FreeChunk* junk = NULL;
 232     assert(UseCompressedKlassPointers ||
 233            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 234            "Offset of FreeChunk::_prev within FreeChunk must match"
 235            "  that of OopDesc::_klass within OopDesc");
 236   )
 237   if (CollectedHeap::use_parallel_gc_threads()) {
 238     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 239     _par_gc_thread_states =
 240       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 241     if (_par_gc_thread_states == NULL) {
 242       vm_exit_during_initialization("Could not allocate par gc structs");
 243     }
 244     for (uint i = 0; i < ParallelGCThreads; i++) {
 245       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 246       if (_par_gc_thread_states[i] == NULL) {
 247         vm_exit_during_initialization("Could not allocate par gc structs");
 248       }
 249     }
 250   } else {
 251     _par_gc_thread_states = NULL;
 252   }
 253   _incremental_collection_failed = false;
 254   // The "dilatation_factor" is the expansion that can occur on
 255   // account of the fact that the minimum object size in the CMS
 256   // generation may be larger than that in, say, a contiguous young
 257   //  generation.
 258   // Ideally, in the calculation below, we'd compute the dilatation
 259   // factor as: MinChunkSize/(promoting_gen's min object size)
 260   // Since we do not have such a general query interface for the
 261   // promoting generation, we'll instead just use the mimimum
 262   // object size (which today is a header's worth of space);
 263   // note that all arithmetic is in units of HeapWords.
 264   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 265   assert(_dilatation_factor >= 1.0, "from previous assert");
 266 }
 267 
 268 
 269 // The field "_initiating_occupancy" represents the occupancy percentage
 270 // at which we trigger a new collection cycle.  Unless explicitly specified
 271 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 272 // is calculated by:
 273 //
 274 //   Let "f" be MinHeapFreeRatio in
 275 //
 276 //    _intiating_occupancy = 100-f +
 277 //                           f * (CMSTriggerRatio/100)
 278 //   where CMSTriggerRatio is the argument "tr" below.
 279 //
 280 // That is, if we assume the heap is at its desired maximum occupancy at the
 281 // end of a collection, we let CMSTriggerRatio of the (purported) free
 282 // space be allocated before initiating a new collection cycle.
 283 //
 284 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 285   assert(io <= 100 && tr <= 100, "Check the arguments");
 286   if (io >= 0) {
 287     _initiating_occupancy = (double)io / 100.0;
 288   } else {
 289     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 290                              (double)(tr * MinHeapFreeRatio) / 100.0)
 291                             / 100.0;
 292   }
 293 }
 294 
 295 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 296   assert(collector() != NULL, "no collector");
 297   collector()->ref_processor_init();
 298 }
 299 
 300 void CMSCollector::ref_processor_init() {
 301   if (_ref_processor == NULL) {
 302     // Allocate and initialize a reference processor
 303     _ref_processor =
 304       new ReferenceProcessor(_span,                               // span
 305                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 306                              (int) ParallelGCThreads,             // mt processing degree
 307                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 308                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 309                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 310                              &_is_alive_closure,                  // closure for liveness info
 311                              false);                              // next field updates do not need write barrier
 312     // Initialize the _ref_processor field of CMSGen
 313     _cmsGen->set_ref_processor(_ref_processor);
 314 
 315   }
 316 }
 317 
 318 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 319   GenCollectedHeap* gch = GenCollectedHeap::heap();
 320   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 321     "Wrong type of heap");
 322   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 323     gch->gen_policy()->size_policy();
 324   assert(sp->is_gc_cms_adaptive_size_policy(),
 325     "Wrong type of size policy");
 326   return sp;
 327 }
 328 
 329 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 330   CMSGCAdaptivePolicyCounters* results =
 331     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 332   assert(
 333     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 334     "Wrong gc policy counter kind");
 335   return results;
 336 }
 337 
 338 
 339 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 340 
 341   const char* gen_name = "old";
 342 
 343   // Generation Counters - generation 1, 1 subspace
 344   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 345 
 346   _space_counters = new GSpaceCounters(gen_name, 0,
 347                                        _virtual_space.reserved_size(),
 348                                        this, _gen_counters);
 349 }
 350 
 351 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 352   _cms_gen(cms_gen)
 353 {
 354   assert(alpha <= 100, "bad value");
 355   _saved_alpha = alpha;
 356 
 357   // Initialize the alphas to the bootstrap value of 100.
 358   _gc0_alpha = _cms_alpha = 100;
 359 
 360   _cms_begin_time.update();
 361   _cms_end_time.update();
 362 
 363   _gc0_duration = 0.0;
 364   _gc0_period = 0.0;
 365   _gc0_promoted = 0;
 366 
 367   _cms_duration = 0.0;
 368   _cms_period = 0.0;
 369   _cms_allocated = 0;
 370 
 371   _cms_used_at_gc0_begin = 0;
 372   _cms_used_at_gc0_end = 0;
 373   _allow_duty_cycle_reduction = false;
 374   _valid_bits = 0;
 375   _icms_duty_cycle = CMSIncrementalDutyCycle;
 376 }
 377 
 378 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 379   // TBD: CR 6909490
 380   return 1.0;
 381 }
 382 
 383 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 384 }
 385 
 386 // If promotion failure handling is on use
 387 // the padded average size of the promotion for each
 388 // young generation collection.
 389 double CMSStats::time_until_cms_gen_full() const {
 390   size_t cms_free = _cms_gen->cmsSpace()->free();
 391   GenCollectedHeap* gch = GenCollectedHeap::heap();
 392   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 393                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 394   if (cms_free > expected_promotion) {
 395     // Start a cms collection if there isn't enough space to promote
 396     // for the next minor collection.  Use the padded average as
 397     // a safety factor.
 398     cms_free -= expected_promotion;
 399 
 400     // Adjust by the safety factor.
 401     double cms_free_dbl = (double)cms_free;
 402     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 403     // Apply a further correction factor which tries to adjust
 404     // for recent occurance of concurrent mode failures.
 405     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 406     cms_free_dbl = cms_free_dbl * cms_adjustment;
 407 
 408     if (PrintGCDetails && Verbose) {
 409       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 410         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 411         cms_free, expected_promotion);
 412       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 413         cms_free_dbl, cms_consumption_rate() + 1.0);
 414     }
 415     // Add 1 in case the consumption rate goes to zero.
 416     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 417   }
 418   return 0.0;
 419 }
 420 
 421 // Compare the duration of the cms collection to the
 422 // time remaining before the cms generation is empty.
 423 // Note that the time from the start of the cms collection
 424 // to the start of the cms sweep (less than the total
 425 // duration of the cms collection) can be used.  This
 426 // has been tried and some applications experienced
 427 // promotion failures early in execution.  This was
 428 // possibly because the averages were not accurate
 429 // enough at the beginning.
 430 double CMSStats::time_until_cms_start() const {
 431   // We add "gc0_period" to the "work" calculation
 432   // below because this query is done (mostly) at the
 433   // end of a scavenge, so we need to conservatively
 434   // account for that much possible delay
 435   // in the query so as to avoid concurrent mode failures
 436   // due to starting the collection just a wee bit too
 437   // late.
 438   double work = cms_duration() + gc0_period();
 439   double deadline = time_until_cms_gen_full();
 440   // If a concurrent mode failure occurred recently, we want to be
 441   // more conservative and halve our expected time_until_cms_gen_full()
 442   if (work > deadline) {
 443     if (Verbose && PrintGCDetails) {
 444       gclog_or_tty->print(
 445         " CMSCollector: collect because of anticipated promotion "
 446         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 447         gc0_period(), time_until_cms_gen_full());
 448     }
 449     return 0.0;
 450   }
 451   return work - deadline;
 452 }
 453 
 454 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 455 // amount of change to prevent wild oscillation.
 456 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 457                                               unsigned int new_duty_cycle) {
 458   assert(old_duty_cycle <= 100, "bad input value");
 459   assert(new_duty_cycle <= 100, "bad input value");
 460 
 461   // Note:  use subtraction with caution since it may underflow (values are
 462   // unsigned).  Addition is safe since we're in the range 0-100.
 463   unsigned int damped_duty_cycle = new_duty_cycle;
 464   if (new_duty_cycle < old_duty_cycle) {
 465     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 466     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 467       damped_duty_cycle = old_duty_cycle - largest_delta;
 468     }
 469   } else if (new_duty_cycle > old_duty_cycle) {
 470     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 471     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 472       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 473     }
 474   }
 475   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 476 
 477   if (CMSTraceIncrementalPacing) {
 478     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 479                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 480   }
 481   return damped_duty_cycle;
 482 }
 483 
 484 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 485   assert(CMSIncrementalPacing && valid(),
 486          "should be handled in icms_update_duty_cycle()");
 487 
 488   double cms_time_so_far = cms_timer().seconds();
 489   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 490   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 491 
 492   // Avoid division by 0.
 493   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 494   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 495 
 496   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 497   if (new_duty_cycle > _icms_duty_cycle) {
 498     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 499     if (new_duty_cycle > 2) {
 500       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 501                                                 new_duty_cycle);
 502     }
 503   } else if (_allow_duty_cycle_reduction) {
 504     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 505     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 506     // Respect the minimum duty cycle.
 507     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 508     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 509   }
 510 
 511   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 512     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 513   }
 514 
 515   _allow_duty_cycle_reduction = false;
 516   return _icms_duty_cycle;
 517 }
 518 
 519 #ifndef PRODUCT
 520 void CMSStats::print_on(outputStream *st) const {
 521   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 522   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 523                gc0_duration(), gc0_period(), gc0_promoted());
 524   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 525             cms_duration(), cms_duration_per_mb(),
 526             cms_period(), cms_allocated());
 527   st->print(",cms_since_beg=%g,cms_since_end=%g",
 528             cms_time_since_begin(), cms_time_since_end());
 529   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 530             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 531   if (CMSIncrementalMode) {
 532     st->print(",dc=%d", icms_duty_cycle());
 533   }
 534 
 535   if (valid()) {
 536     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 537               promotion_rate(), cms_allocation_rate());
 538     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 539               cms_consumption_rate(), time_until_cms_gen_full());
 540   }
 541   st->print(" ");
 542 }
 543 #endif // #ifndef PRODUCT
 544 
 545 CMSCollector::CollectorState CMSCollector::_collectorState =
 546                              CMSCollector::Idling;
 547 bool CMSCollector::_foregroundGCIsActive = false;
 548 bool CMSCollector::_foregroundGCShouldWait = false;
 549 
 550 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 551                            CardTableRS*                   ct,
 552                            ConcurrentMarkSweepPolicy*     cp):
 553   _cmsGen(cmsGen),
 554   _ct(ct),
 555   _ref_processor(NULL),    // will be set later
 556   _conc_workers(NULL),     // may be set later
 557   _abort_preclean(false),
 558   _start_sampling(false),
 559   _between_prologue_and_epilogue(false),
 560   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 561   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 562                  -1 /* lock-free */, "No_lock" /* dummy */),
 563   _modUnionClosure(&_modUnionTable),
 564   _modUnionClosurePar(&_modUnionTable),
 565   // Adjust my span to cover old (cms) gen
 566   _span(cmsGen->reserved()),
 567   // Construct the is_alive_closure with _span & markBitMap
 568   _is_alive_closure(_span, &_markBitMap),
 569   _restart_addr(NULL),
 570   _overflow_list(NULL),
 571   _stats(cmsGen),
 572   _eden_chunk_array(NULL),     // may be set in ctor body
 573   _eden_chunk_capacity(0),     // -- ditto --
 574   _eden_chunk_index(0),        // -- ditto --
 575   _survivor_plab_array(NULL),  // -- ditto --
 576   _survivor_chunk_array(NULL), // -- ditto --
 577   _survivor_chunk_capacity(0), // -- ditto --
 578   _survivor_chunk_index(0),    // -- ditto --
 579   _ser_pmc_preclean_ovflw(0),
 580   _ser_kac_preclean_ovflw(0),
 581   _ser_pmc_remark_ovflw(0),
 582   _par_pmc_remark_ovflw(0),
 583   _ser_kac_ovflw(0),
 584   _par_kac_ovflw(0),
 585 #ifndef PRODUCT
 586   _num_par_pushes(0),
 587 #endif
 588   _collection_count_start(0),
 589   _verifying(false),
 590   _icms_start_limit(NULL),
 591   _icms_stop_limit(NULL),
 592   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 593   _completed_initialization(false),
 594   _collector_policy(cp),
 595   _should_unload_classes(false),
 596   _concurrent_cycles_since_last_unload(0),
 597   _roots_scanning_options(0),
 598   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 599   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 600   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 601   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 602   _cms_start_registered(false)
 603 {
 604   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 605     ExplicitGCInvokesConcurrent = true;
 606   }
 607   // Now expand the span and allocate the collection support structures
 608   // (MUT, marking bit map etc.) to cover both generations subject to
 609   // collection.
 610 
 611   // For use by dirty card to oop closures.
 612   _cmsGen->cmsSpace()->set_collector(this);
 613 
 614   // Allocate MUT and marking bit map
 615   {
 616     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 617     if (!_markBitMap.allocate(_span)) {
 618       warning("Failed to allocate CMS Bit Map");
 619       return;
 620     }
 621     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 622   }
 623   {
 624     _modUnionTable.allocate(_span);
 625     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 626   }
 627 
 628   if (!_markStack.allocate(MarkStackSize)) {
 629     warning("Failed to allocate CMS Marking Stack");
 630     return;
 631   }
 632 
 633   // Support for multi-threaded concurrent phases
 634   if (CMSConcurrentMTEnabled) {
 635     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 636       // just for now
 637       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 638     }
 639     if (ConcGCThreads > 1) {
 640       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 641                                  ConcGCThreads, true);
 642       if (_conc_workers == NULL) {
 643         warning("GC/CMS: _conc_workers allocation failure: "
 644               "forcing -CMSConcurrentMTEnabled");
 645         CMSConcurrentMTEnabled = false;
 646       } else {
 647         _conc_workers->initialize_workers();
 648       }
 649     } else {
 650       CMSConcurrentMTEnabled = false;
 651     }
 652   }
 653   if (!CMSConcurrentMTEnabled) {
 654     ConcGCThreads = 0;
 655   } else {
 656     // Turn off CMSCleanOnEnter optimization temporarily for
 657     // the MT case where it's not fixed yet; see 6178663.
 658     CMSCleanOnEnter = false;
 659   }
 660   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 661          "Inconsistency");
 662 
 663   // Parallel task queues; these are shared for the
 664   // concurrent and stop-world phases of CMS, but
 665   // are not shared with parallel scavenge (ParNew).
 666   {
 667     uint i;
 668     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 669 
 670     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 671          || ParallelRefProcEnabled)
 672         && num_queues > 0) {
 673       _task_queues = new OopTaskQueueSet(num_queues);
 674       if (_task_queues == NULL) {
 675         warning("task_queues allocation failure.");
 676         return;
 677       }
 678       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 679       if (_hash_seed == NULL) {
 680         warning("_hash_seed array allocation failure");
 681         return;
 682       }
 683 
 684       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 685       for (i = 0; i < num_queues; i++) {
 686         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 687         if (q == NULL) {
 688           warning("work_queue allocation failure.");
 689           return;
 690         }
 691         _task_queues->register_queue(i, q);
 692       }
 693       for (i = 0; i < num_queues; i++) {
 694         _task_queues->queue(i)->initialize();
 695         _hash_seed[i] = 17;  // copied from ParNew
 696       }
 697     }
 698   }
 699 
 700   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 701 
 702   // Clip CMSBootstrapOccupancy between 0 and 100.
 703   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 704 
 705   _full_gcs_since_conc_gc = 0;
 706 
 707   // Now tell CMS generations the identity of their collector
 708   ConcurrentMarkSweepGeneration::set_collector(this);
 709 
 710   // Create & start a CMS thread for this CMS collector
 711   _cmsThread = ConcurrentMarkSweepThread::start(this);
 712   assert(cmsThread() != NULL, "CMS Thread should have been created");
 713   assert(cmsThread()->collector() == this,
 714          "CMS Thread should refer to this gen");
 715   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 716 
 717   // Support for parallelizing young gen rescan
 718   GenCollectedHeap* gch = GenCollectedHeap::heap();
 719   _young_gen = gch->prev_gen(_cmsGen);
 720   if (gch->supports_inline_contig_alloc()) {
 721     _top_addr = gch->top_addr();
 722     _end_addr = gch->end_addr();
 723     assert(_young_gen != NULL, "no _young_gen");
 724     _eden_chunk_index = 0;
 725     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 726     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 727     if (_eden_chunk_array == NULL) {
 728       _eden_chunk_capacity = 0;
 729       warning("GC/CMS: _eden_chunk_array allocation failure");
 730     }
 731   }
 732   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 733 
 734   // Support for parallelizing survivor space rescan
 735   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 736     const size_t max_plab_samples =
 737       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 738 
 739     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 740     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 741     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 742     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 743         || _cursor == NULL) {
 744       warning("Failed to allocate survivor plab/chunk array");
 745       if (_survivor_plab_array  != NULL) {
 746         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 747         _survivor_plab_array = NULL;
 748       }
 749       if (_survivor_chunk_array != NULL) {
 750         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 751         _survivor_chunk_array = NULL;
 752       }
 753       if (_cursor != NULL) {
 754         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 755         _cursor = NULL;
 756       }
 757     } else {
 758       _survivor_chunk_capacity = 2*max_plab_samples;
 759       for (uint i = 0; i < ParallelGCThreads; i++) {
 760         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 761         if (vec == NULL) {
 762           warning("Failed to allocate survivor plab array");
 763           for (int j = i; j > 0; j--) {
 764             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 765           }
 766           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 767           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 768           _survivor_plab_array = NULL;
 769           _survivor_chunk_array = NULL;
 770           _survivor_chunk_capacity = 0;
 771           break;
 772         } else {
 773           ChunkArray* cur =
 774             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 775                                                         max_plab_samples);
 776           assert(cur->end() == 0, "Should be 0");
 777           assert(cur->array() == vec, "Should be vec");
 778           assert(cur->capacity() == max_plab_samples, "Error");
 779         }
 780       }
 781     }
 782   }
 783   assert(   (   _survivor_plab_array  != NULL
 784              && _survivor_chunk_array != NULL)
 785          || (   _survivor_chunk_capacity == 0
 786              && _survivor_chunk_index == 0),
 787          "Error");
 788 
 789   // Choose what strong roots should be scanned depending on verification options
 790   if (!CMSClassUnloadingEnabled) {
 791     // If class unloading is disabled we want to include all classes into the root set.
 792     add_root_scanning_option(SharedHeap::SO_AllClasses);
 793   } else {
 794     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 795   }
 796 
 797   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 798   _gc_counters = new CollectorCounters("CMS", 1);
 799   _completed_initialization = true;
 800   _inter_sweep_timer.start();  // start of time
 801 }
 802 
 803 const char* ConcurrentMarkSweepGeneration::name() const {
 804   return "concurrent mark-sweep generation";
 805 }
 806 void ConcurrentMarkSweepGeneration::update_counters() {
 807   if (UsePerfData) {
 808     _space_counters->update_all();
 809     _gen_counters->update_all();
 810   }
 811 }
 812 
 813 // this is an optimized version of update_counters(). it takes the
 814 // used value as a parameter rather than computing it.
 815 //
 816 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 817   if (UsePerfData) {
 818     _space_counters->update_used(used);
 819     _space_counters->update_capacity();
 820     _gen_counters->update_all();
 821   }
 822 }
 823 
 824 void ConcurrentMarkSweepGeneration::print() const {
 825   Generation::print();
 826   cmsSpace()->print();
 827 }
 828 
 829 #ifndef PRODUCT
 830 void ConcurrentMarkSweepGeneration::print_statistics() {
 831   cmsSpace()->printFLCensus(0);
 832 }
 833 #endif
 834 
 835 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 836   GenCollectedHeap* gch = GenCollectedHeap::heap();
 837   if (PrintGCDetails) {
 838     if (Verbose) {
 839       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 840         level(), short_name(), s, used(), capacity());
 841     } else {
 842       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 843         level(), short_name(), s, used() / K, capacity() / K);
 844     }
 845   }
 846   if (Verbose) {
 847     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 848               gch->used(), gch->capacity());
 849   } else {
 850     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 851               gch->used() / K, gch->capacity() / K);
 852   }
 853 }
 854 
 855 size_t
 856 ConcurrentMarkSweepGeneration::contiguous_available() const {
 857   // dld proposes an improvement in precision here. If the committed
 858   // part of the space ends in a free block we should add that to
 859   // uncommitted size in the calculation below. Will make this
 860   // change later, staying with the approximation below for the
 861   // time being. -- ysr.
 862   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 863 }
 864 
 865 size_t
 866 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 867   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 868 }
 869 
 870 size_t ConcurrentMarkSweepGeneration::max_available() const {
 871   return free() + _virtual_space.uncommitted_size();
 872 }
 873 
 874 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 875   size_t available = max_available();
 876   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 877   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 878   if (Verbose && PrintGCDetails) {
 879     gclog_or_tty->print_cr(
 880       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 881       "max_promo("SIZE_FORMAT")",
 882       res? "":" not", available, res? ">=":"<",
 883       av_promo, max_promotion_in_bytes);
 884   }
 885   return res;
 886 }
 887 
 888 // At a promotion failure dump information on block layout in heap
 889 // (cms old generation).
 890 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 891   if (CMSDumpAtPromotionFailure) {
 892     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 893   }
 894 }
 895 
 896 CompactibleSpace*
 897 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 898   return _cmsSpace;
 899 }
 900 
 901 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 902   // Clear the promotion information.  These pointers can be adjusted
 903   // along with all the other pointers into the heap but
 904   // compaction is expected to be a rare event with
 905   // a heap using cms so don't do it without seeing the need.
 906   if (CollectedHeap::use_parallel_gc_threads()) {
 907     for (uint i = 0; i < ParallelGCThreads; i++) {
 908       _par_gc_thread_states[i]->promo.reset();
 909     }
 910   }
 911 }
 912 
 913 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 914   blk->do_space(_cmsSpace);
 915 }
 916 
 917 void ConcurrentMarkSweepGeneration::compute_new_size() {
 918   assert_locked_or_safepoint(Heap_lock);
 919 
 920   // If incremental collection failed, we just want to expand
 921   // to the limit.
 922   if (incremental_collection_failed()) {
 923     clear_incremental_collection_failed();
 924     grow_to_reserved();
 925     return;
 926   }
 927 
 928   // The heap has been compacted but not reset yet.
 929   // Any metric such as free() or used() will be incorrect.
 930 
 931   CardGeneration::compute_new_size();
 932 
 933   // Reset again after a possible resizing
 934   if (did_compact()) {
 935     cmsSpace()->reset_after_compaction();
 936   }
 937 }
 938 
 939 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 940   assert_locked_or_safepoint(Heap_lock);
 941 
 942   // If incremental collection failed, we just want to expand
 943   // to the limit.
 944   if (incremental_collection_failed()) {
 945     clear_incremental_collection_failed();
 946     grow_to_reserved();
 947     return;
 948   }
 949 
 950   double free_percentage = ((double) free()) / capacity();
 951   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 952   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 953 
 954   // compute expansion delta needed for reaching desired free percentage
 955   if (free_percentage < desired_free_percentage) {
 956     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 957     assert(desired_capacity >= capacity(), "invalid expansion size");
 958     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 959     if (PrintGCDetails && Verbose) {
 960       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 961       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 962       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 963       gclog_or_tty->print_cr("  Desired free fraction %f",
 964         desired_free_percentage);
 965       gclog_or_tty->print_cr("  Maximum free fraction %f",
 966         maximum_free_percentage);
 967       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 968       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 969         desired_capacity/1000);
 970       int prev_level = level() - 1;
 971       if (prev_level >= 0) {
 972         size_t prev_size = 0;
 973         GenCollectedHeap* gch = GenCollectedHeap::heap();
 974         Generation* prev_gen = gch->_gens[prev_level];
 975         prev_size = prev_gen->capacity();
 976           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 977                                  prev_size/1000);
 978       }
 979       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 980         unsafe_max_alloc_nogc()/1000);
 981       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 982         contiguous_available()/1000);
 983       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 984         expand_bytes);
 985     }
 986     // safe if expansion fails
 987     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 988     if (PrintGCDetails && Verbose) {
 989       gclog_or_tty->print_cr("  Expanded free fraction %f",
 990         ((double) free()) / capacity());
 991     }
 992   } else {
 993     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 994     assert(desired_capacity <= capacity(), "invalid expansion size");
 995     size_t shrink_bytes = capacity() - desired_capacity;
 996     // Don't shrink unless the delta is greater than the minimum shrink we want
 997     if (shrink_bytes >= MinHeapDeltaBytes) {
 998       shrink_free_list_by(shrink_bytes);
 999     }
1000   }
1001 }
1002 
1003 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1004   return cmsSpace()->freelistLock();
1005 }
1006 
1007 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1008                                                   bool   tlab) {
1009   CMSSynchronousYieldRequest yr;
1010   MutexLockerEx x(freelistLock(),
1011                   Mutex::_no_safepoint_check_flag);
1012   return have_lock_and_allocate(size, tlab);
1013 }
1014 
1015 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1016                                                   bool   tlab /* ignored */) {
1017   assert_lock_strong(freelistLock());
1018   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1019   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1020   // Allocate the object live (grey) if the background collector has
1021   // started marking. This is necessary because the marker may
1022   // have passed this address and consequently this object will
1023   // not otherwise be greyed and would be incorrectly swept up.
1024   // Note that if this object contains references, the writing
1025   // of those references will dirty the card containing this object
1026   // allowing the object to be blackened (and its references scanned)
1027   // either during a preclean phase or at the final checkpoint.
1028   if (res != NULL) {
1029     // We may block here with an uninitialized object with
1030     // its mark-bit or P-bits not yet set. Such objects need
1031     // to be safely navigable by block_start().
1032     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1033     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1034     collector()->direct_allocated(res, adjustedSize);
1035     _direct_allocated_words += adjustedSize;
1036     // allocation counters
1037     NOT_PRODUCT(
1038       _numObjectsAllocated++;
1039       _numWordsAllocated += (int)adjustedSize;
1040     )
1041   }
1042   return res;
1043 }
1044 
1045 // In the case of direct allocation by mutators in a generation that
1046 // is being concurrently collected, the object must be allocated
1047 // live (grey) if the background collector has started marking.
1048 // This is necessary because the marker may
1049 // have passed this address and consequently this object will
1050 // not otherwise be greyed and would be incorrectly swept up.
1051 // Note that if this object contains references, the writing
1052 // of those references will dirty the card containing this object
1053 // allowing the object to be blackened (and its references scanned)
1054 // either during a preclean phase or at the final checkpoint.
1055 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1056   assert(_markBitMap.covers(start, size), "Out of bounds");
1057   if (_collectorState >= Marking) {
1058     MutexLockerEx y(_markBitMap.lock(),
1059                     Mutex::_no_safepoint_check_flag);
1060     // [see comments preceding SweepClosure::do_blk() below for details]
1061     //
1062     // Can the P-bits be deleted now?  JJJ
1063     //
1064     // 1. need to mark the object as live so it isn't collected
1065     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1066     // 3. need to mark the end of the object so marking, precleaning or sweeping
1067     //    can skip over uninitialized or unparsable objects. An allocated
1068     //    object is considered uninitialized for our purposes as long as
1069     //    its klass word is NULL.  All old gen objects are parsable
1070     //    as soon as they are initialized.)
1071     _markBitMap.mark(start);          // object is live
1072     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1073     _markBitMap.mark(start + size - 1);
1074                                       // mark end of object
1075   }
1076   // check that oop looks uninitialized
1077   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1078 }
1079 
1080 void CMSCollector::promoted(bool par, HeapWord* start,
1081                             bool is_obj_array, size_t obj_size) {
1082   assert(_markBitMap.covers(start), "Out of bounds");
1083   // See comment in direct_allocated() about when objects should
1084   // be allocated live.
1085   if (_collectorState >= Marking) {
1086     // we already hold the marking bit map lock, taken in
1087     // the prologue
1088     if (par) {
1089       _markBitMap.par_mark(start);
1090     } else {
1091       _markBitMap.mark(start);
1092     }
1093     // We don't need to mark the object as uninitialized (as
1094     // in direct_allocated above) because this is being done with the
1095     // world stopped and the object will be initialized by the
1096     // time the marking, precleaning or sweeping get to look at it.
1097     // But see the code for copying objects into the CMS generation,
1098     // where we need to ensure that concurrent readers of the
1099     // block offset table are able to safely navigate a block that
1100     // is in flux from being free to being allocated (and in
1101     // transition while being copied into) and subsequently
1102     // becoming a bona-fide object when the copy/promotion is complete.
1103     assert(SafepointSynchronize::is_at_safepoint(),
1104            "expect promotion only at safepoints");
1105 
1106     if (_collectorState < Sweeping) {
1107       // Mark the appropriate cards in the modUnionTable, so that
1108       // this object gets scanned before the sweep. If this is
1109       // not done, CMS generation references in the object might
1110       // not get marked.
1111       // For the case of arrays, which are otherwise precisely
1112       // marked, we need to dirty the entire array, not just its head.
1113       if (is_obj_array) {
1114         // The [par_]mark_range() method expects mr.end() below to
1115         // be aligned to the granularity of a bit's representation
1116         // in the heap. In the case of the MUT below, that's a
1117         // card size.
1118         MemRegion mr(start,
1119                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1120                         CardTableModRefBS::card_size /* bytes */));
1121         if (par) {
1122           _modUnionTable.par_mark_range(mr);
1123         } else {
1124           _modUnionTable.mark_range(mr);
1125         }
1126       } else {  // not an obj array; we can just mark the head
1127         if (par) {
1128           _modUnionTable.par_mark(start);
1129         } else {
1130           _modUnionTable.mark(start);
1131         }
1132       }
1133     }
1134   }
1135 }
1136 
1137 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1138 {
1139   size_t delta = pointer_delta(addr, space->bottom());
1140   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1141 }
1142 
1143 void CMSCollector::icms_update_allocation_limits()
1144 {
1145   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1146   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1147 
1148   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1149   if (CMSTraceIncrementalPacing) {
1150     stats().print();
1151   }
1152 
1153   assert(duty_cycle <= 100, "invalid duty cycle");
1154   if (duty_cycle != 0) {
1155     // The duty_cycle is a percentage between 0 and 100; convert to words and
1156     // then compute the offset from the endpoints of the space.
1157     size_t free_words = eden->free() / HeapWordSize;
1158     double free_words_dbl = (double)free_words;
1159     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1160     size_t offset_words = (free_words - duty_cycle_words) / 2;
1161 
1162     _icms_start_limit = eden->top() + offset_words;
1163     _icms_stop_limit = eden->end() - offset_words;
1164 
1165     // The limits may be adjusted (shifted to the right) by
1166     // CMSIncrementalOffset, to allow the application more mutator time after a
1167     // young gen gc (when all mutators were stopped) and before CMS starts and
1168     // takes away one or more cpus.
1169     if (CMSIncrementalOffset != 0) {
1170       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1171       size_t adjustment = (size_t)adjustment_dbl;
1172       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1173       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1174         _icms_start_limit += adjustment;
1175         _icms_stop_limit = tmp_stop;
1176       }
1177     }
1178   }
1179   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1180     _icms_start_limit = _icms_stop_limit = eden->end();
1181   }
1182 
1183   // Install the new start limit.
1184   eden->set_soft_end(_icms_start_limit);
1185 
1186   if (CMSTraceIncrementalMode) {
1187     gclog_or_tty->print(" icms alloc limits:  "
1188                            PTR_FORMAT "," PTR_FORMAT
1189                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1190                            _icms_start_limit, _icms_stop_limit,
1191                            percent_of_space(eden, _icms_start_limit),
1192                            percent_of_space(eden, _icms_stop_limit));
1193     if (Verbose) {
1194       gclog_or_tty->print("eden:  ");
1195       eden->print_on(gclog_or_tty);
1196     }
1197   }
1198 }
1199 
1200 // Any changes here should try to maintain the invariant
1201 // that if this method is called with _icms_start_limit
1202 // and _icms_stop_limit both NULL, then it should return NULL
1203 // and not notify the icms thread.
1204 HeapWord*
1205 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1206                                        size_t word_size)
1207 {
1208   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1209   // nop.
1210   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1211     if (top <= _icms_start_limit) {
1212       if (CMSTraceIncrementalMode) {
1213         space->print_on(gclog_or_tty);
1214         gclog_or_tty->stamp();
1215         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1216                                ", new limit=" PTR_FORMAT
1217                                " (" SIZE_FORMAT "%%)",
1218                                top, _icms_stop_limit,
1219                                percent_of_space(space, _icms_stop_limit));
1220       }
1221       ConcurrentMarkSweepThread::start_icms();
1222       assert(top < _icms_stop_limit, "Tautology");
1223       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1224         return _icms_stop_limit;
1225       }
1226 
1227       // The allocation will cross both the _start and _stop limits, so do the
1228       // stop notification also and return end().
1229       if (CMSTraceIncrementalMode) {
1230         space->print_on(gclog_or_tty);
1231         gclog_or_tty->stamp();
1232         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1233                                ", new limit=" PTR_FORMAT
1234                                " (" SIZE_FORMAT "%%)",
1235                                top, space->end(),
1236                                percent_of_space(space, space->end()));
1237       }
1238       ConcurrentMarkSweepThread::stop_icms();
1239       return space->end();
1240     }
1241 
1242     if (top <= _icms_stop_limit) {
1243       if (CMSTraceIncrementalMode) {
1244         space->print_on(gclog_or_tty);
1245         gclog_or_tty->stamp();
1246         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1247                                ", new limit=" PTR_FORMAT
1248                                " (" SIZE_FORMAT "%%)",
1249                                top, space->end(),
1250                                percent_of_space(space, space->end()));
1251       }
1252       ConcurrentMarkSweepThread::stop_icms();
1253       return space->end();
1254     }
1255 
1256     if (CMSTraceIncrementalMode) {
1257       space->print_on(gclog_or_tty);
1258       gclog_or_tty->stamp();
1259       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1260                              ", new limit=" PTR_FORMAT,
1261                              top, NULL);
1262     }
1263   }
1264 
1265   return NULL;
1266 }
1267 
1268 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1269   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1270   // allocate, copy and if necessary update promoinfo --
1271   // delegate to underlying space.
1272   assert_lock_strong(freelistLock());
1273 
1274 #ifndef PRODUCT
1275   if (Universe::heap()->promotion_should_fail()) {
1276     return NULL;
1277   }
1278 #endif  // #ifndef PRODUCT
1279 
1280   oop res = _cmsSpace->promote(obj, obj_size);
1281   if (res == NULL) {
1282     // expand and retry
1283     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1284     expand(s*HeapWordSize, MinHeapDeltaBytes,
1285       CMSExpansionCause::_satisfy_promotion);
1286     // Since there's currently no next generation, we don't try to promote
1287     // into a more senior generation.
1288     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1289                                "is made to pass on a possibly failing "
1290                                "promotion to next generation");
1291     res = _cmsSpace->promote(obj, obj_size);
1292   }
1293   if (res != NULL) {
1294     // See comment in allocate() about when objects should
1295     // be allocated live.
1296     assert(obj->is_oop(), "Will dereference klass pointer below");
1297     collector()->promoted(false,           // Not parallel
1298                           (HeapWord*)res, obj->is_objArray(), obj_size);
1299     // promotion counters
1300     NOT_PRODUCT(
1301       _numObjectsPromoted++;
1302       _numWordsPromoted +=
1303         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1304     )
1305   }
1306   return res;
1307 }
1308 
1309 
1310 HeapWord*
1311 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1312                                              HeapWord* top,
1313                                              size_t word_sz)
1314 {
1315   return collector()->allocation_limit_reached(space, top, word_sz);
1316 }
1317 
1318 // IMPORTANT: Notes on object size recognition in CMS.
1319 // ---------------------------------------------------
1320 // A block of storage in the CMS generation is always in
1321 // one of three states. A free block (FREE), an allocated
1322 // object (OBJECT) whose size() method reports the correct size,
1323 // and an intermediate state (TRANSIENT) in which its size cannot
1324 // be accurately determined.
1325 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1326 // -----------------------------------------------------
1327 // FREE:      klass_word & 1 == 1; mark_word holds block size
1328 //
1329 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1330 //            obj->size() computes correct size
1331 //
1332 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1333 //
1334 // STATE IDENTIFICATION: (64 bit+COOPS)
1335 // ------------------------------------
1336 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1337 //
1338 // OBJECT:    klass_word installed; klass_word != 0;
1339 //            obj->size() computes correct size
1340 //
1341 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1342 //
1343 //
1344 // STATE TRANSITION DIAGRAM
1345 //
1346 //        mut / parnew                     mut  /  parnew
1347 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1348 //  ^                                                                   |
1349 //  |------------------------ DEAD <------------------------------------|
1350 //         sweep                            mut
1351 //
1352 // While a block is in TRANSIENT state its size cannot be determined
1353 // so readers will either need to come back later or stall until
1354 // the size can be determined. Note that for the case of direct
1355 // allocation, P-bits, when available, may be used to determine the
1356 // size of an object that may not yet have been initialized.
1357 
1358 // Things to support parallel young-gen collection.
1359 oop
1360 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1361                                            oop old, markOop m,
1362                                            size_t word_sz) {
1363 #ifndef PRODUCT
1364   if (Universe::heap()->promotion_should_fail()) {
1365     return NULL;
1366   }
1367 #endif  // #ifndef PRODUCT
1368 
1369   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1370   PromotionInfo* promoInfo = &ps->promo;
1371   // if we are tracking promotions, then first ensure space for
1372   // promotion (including spooling space for saving header if necessary).
1373   // then allocate and copy, then track promoted info if needed.
1374   // When tracking (see PromotionInfo::track()), the mark word may
1375   // be displaced and in this case restoration of the mark word
1376   // occurs in the (oop_since_save_marks_)iterate phase.
1377   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1378     // Out of space for allocating spooling buffers;
1379     // try expanding and allocating spooling buffers.
1380     if (!expand_and_ensure_spooling_space(promoInfo)) {
1381       return NULL;
1382     }
1383   }
1384   assert(promoInfo->has_spooling_space(), "Control point invariant");
1385   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1386   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1387   if (obj_ptr == NULL) {
1388      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1389      if (obj_ptr == NULL) {
1390        return NULL;
1391      }
1392   }
1393   oop obj = oop(obj_ptr);
1394   OrderAccess::storestore();
1395   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1396   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1397   // IMPORTANT: See note on object initialization for CMS above.
1398   // Otherwise, copy the object.  Here we must be careful to insert the
1399   // klass pointer last, since this marks the block as an allocated object.
1400   // Except with compressed oops it's the mark word.
1401   HeapWord* old_ptr = (HeapWord*)old;
1402   // Restore the mark word copied above.
1403   obj->set_mark(m);
1404   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1405   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1406   OrderAccess::storestore();
1407 
1408   if (UseCompressedKlassPointers) {
1409     // Copy gap missed by (aligned) header size calculation below
1410     obj->set_klass_gap(old->klass_gap());
1411   }
1412   if (word_sz > (size_t)oopDesc::header_size()) {
1413     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1414                                  obj_ptr + oopDesc::header_size(),
1415                                  word_sz - oopDesc::header_size());
1416   }
1417 
1418   // Now we can track the promoted object, if necessary.  We take care
1419   // to delay the transition from uninitialized to full object
1420   // (i.e., insertion of klass pointer) until after, so that it
1421   // atomically becomes a promoted object.
1422   if (promoInfo->tracking()) {
1423     promoInfo->track((PromotedObject*)obj, old->klass());
1424   }
1425   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1426   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1427   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1428 
1429   // Finally, install the klass pointer (this should be volatile).
1430   OrderAccess::storestore();
1431   obj->set_klass(old->klass());
1432   // We should now be able to calculate the right size for this object
1433   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1434 
1435   collector()->promoted(true,          // parallel
1436                         obj_ptr, old->is_objArray(), word_sz);
1437 
1438   NOT_PRODUCT(
1439     Atomic::inc_ptr(&_numObjectsPromoted);
1440     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1441   )
1442 
1443   return obj;
1444 }
1445 
1446 void
1447 ConcurrentMarkSweepGeneration::
1448 par_promote_alloc_undo(int thread_num,
1449                        HeapWord* obj, size_t word_sz) {
1450   // CMS does not support promotion undo.
1451   ShouldNotReachHere();
1452 }
1453 
1454 void
1455 ConcurrentMarkSweepGeneration::
1456 par_promote_alloc_done(int thread_num) {
1457   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1458   ps->lab.retire(thread_num);
1459 }
1460 
1461 void
1462 ConcurrentMarkSweepGeneration::
1463 par_oop_since_save_marks_iterate_done(int thread_num) {
1464   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1465   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1466   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1467 }
1468 
1469 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1470                                                    size_t size,
1471                                                    bool   tlab)
1472 {
1473   // We allow a STW collection only if a full
1474   // collection was requested.
1475   return full || should_allocate(size, tlab); // FIX ME !!!
1476   // This and promotion failure handling are connected at the
1477   // hip and should be fixed by untying them.
1478 }
1479 
1480 bool CMSCollector::shouldConcurrentCollect() {
1481   if (_full_gc_requested) {
1482     if (Verbose && PrintGCDetails) {
1483       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1484                              " gc request (or gc_locker)");
1485     }
1486     return true;
1487   }
1488 
1489   // For debugging purposes, change the type of collection.
1490   // If the rotation is not on the concurrent collection
1491   // type, don't start a concurrent collection.
1492   NOT_PRODUCT(
1493     if (RotateCMSCollectionTypes &&
1494         (_cmsGen->debug_collection_type() !=
1495           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1496       assert(_cmsGen->debug_collection_type() !=
1497         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1498         "Bad cms collection type");
1499       return false;
1500     }
1501   )
1502 
1503   FreelistLocker x(this);
1504   // ------------------------------------------------------------------
1505   // Print out lots of information which affects the initiation of
1506   // a collection.
1507   if (PrintCMSInitiationStatistics && stats().valid()) {
1508     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1509     gclog_or_tty->stamp();
1510     gclog_or_tty->print_cr("");
1511     stats().print_on(gclog_or_tty);
1512     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1513       stats().time_until_cms_gen_full());
1514     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1515     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1516                            _cmsGen->contiguous_available());
1517     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1518     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1519     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1520     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1521     gclog_or_tty->print_cr("metadata initialized %d",
1522       MetaspaceGC::should_concurrent_collect());
1523   }
1524   // ------------------------------------------------------------------
1525 
1526   // If the estimated time to complete a cms collection (cms_duration())
1527   // is less than the estimated time remaining until the cms generation
1528   // is full, start a collection.
1529   if (!UseCMSInitiatingOccupancyOnly) {
1530     if (stats().valid()) {
1531       if (stats().time_until_cms_start() == 0.0) {
1532         return true;
1533       }
1534     } else {
1535       // We want to conservatively collect somewhat early in order
1536       // to try and "bootstrap" our CMS/promotion statistics;
1537       // this branch will not fire after the first successful CMS
1538       // collection because the stats should then be valid.
1539       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1540         if (Verbose && PrintGCDetails) {
1541           gclog_or_tty->print_cr(
1542             " CMSCollector: collect for bootstrapping statistics:"
1543             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1544             _bootstrap_occupancy);
1545         }
1546         return true;
1547       }
1548     }
1549   }
1550 
1551   // Otherwise, we start a collection cycle if
1552   // old gen want a collection cycle started. Each may use
1553   // an appropriate criterion for making this decision.
1554   // XXX We need to make sure that the gen expansion
1555   // criterion dovetails well with this. XXX NEED TO FIX THIS
1556   if (_cmsGen->should_concurrent_collect()) {
1557     if (Verbose && PrintGCDetails) {
1558       gclog_or_tty->print_cr("CMS old gen initiated");
1559     }
1560     return true;
1561   }
1562 
1563   // We start a collection if we believe an incremental collection may fail;
1564   // this is not likely to be productive in practice because it's probably too
1565   // late anyway.
1566   GenCollectedHeap* gch = GenCollectedHeap::heap();
1567   assert(gch->collector_policy()->is_two_generation_policy(),
1568          "You may want to check the correctness of the following");
1569   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1570     if (Verbose && PrintGCDetails) {
1571       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1572     }
1573     return true;
1574   }
1575 
1576   if (MetaspaceGC::should_concurrent_collect()) {
1577       if (Verbose && PrintGCDetails) {
1578       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1579       }
1580       return true;
1581     }
1582 
1583   return false;
1584 }
1585 
1586 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1587 
1588 // Clear _expansion_cause fields of constituent generations
1589 void CMSCollector::clear_expansion_cause() {
1590   _cmsGen->clear_expansion_cause();
1591 }
1592 
1593 // We should be conservative in starting a collection cycle.  To
1594 // start too eagerly runs the risk of collecting too often in the
1595 // extreme.  To collect too rarely falls back on full collections,
1596 // which works, even if not optimum in terms of concurrent work.
1597 // As a work around for too eagerly collecting, use the flag
1598 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1599 // giving the user an easily understandable way of controlling the
1600 // collections.
1601 // We want to start a new collection cycle if any of the following
1602 // conditions hold:
1603 // . our current occupancy exceeds the configured initiating occupancy
1604 //   for this generation, or
1605 // . we recently needed to expand this space and have not, since that
1606 //   expansion, done a collection of this generation, or
1607 // . the underlying space believes that it may be a good idea to initiate
1608 //   a concurrent collection (this may be based on criteria such as the
1609 //   following: the space uses linear allocation and linear allocation is
1610 //   going to fail, or there is believed to be excessive fragmentation in
1611 //   the generation, etc... or ...
1612 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1613 //   the case of the old generation; see CR 6543076):
1614 //   we may be approaching a point at which allocation requests may fail because
1615 //   we will be out of sufficient free space given allocation rate estimates.]
1616 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1617 
1618   assert_lock_strong(freelistLock());
1619   if (occupancy() > initiating_occupancy()) {
1620     if (PrintGCDetails && Verbose) {
1621       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1622         short_name(), occupancy(), initiating_occupancy());
1623     }
1624     return true;
1625   }
1626   if (UseCMSInitiatingOccupancyOnly) {
1627     return false;
1628   }
1629   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1630     if (PrintGCDetails && Verbose) {
1631       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1632         short_name());
1633     }
1634     return true;
1635   }
1636   if (_cmsSpace->should_concurrent_collect()) {
1637     if (PrintGCDetails && Verbose) {
1638       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1639         short_name());
1640     }
1641     return true;
1642   }
1643   return false;
1644 }
1645 
1646 void ConcurrentMarkSweepGeneration::collect(bool   full,
1647                                             bool   clear_all_soft_refs,
1648                                             size_t size,
1649                                             bool   tlab)
1650 {
1651   collector()->collect(full, clear_all_soft_refs, size, tlab);
1652 }
1653 
1654 void CMSCollector::collect(bool   full,
1655                            bool   clear_all_soft_refs,
1656                            size_t size,
1657                            bool   tlab)
1658 {
1659   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1660     // For debugging purposes skip the collection if the state
1661     // is not currently idle
1662     if (TraceCMSState) {
1663       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1664         Thread::current(), full, _collectorState);
1665     }
1666     return;
1667   }
1668 
1669   // The following "if" branch is present for defensive reasons.
1670   // In the current uses of this interface, it can be replaced with:
1671   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1672   // But I am not placing that assert here to allow future
1673   // generality in invoking this interface.
1674   if (GC_locker::is_active()) {
1675     // A consistency test for GC_locker
1676     assert(GC_locker::needs_gc(), "Should have been set already");
1677     // Skip this foreground collection, instead
1678     // expanding the heap if necessary.
1679     // Need the free list locks for the call to free() in compute_new_size()
1680     compute_new_size();
1681     return;
1682   }
1683   acquire_control_and_collect(full, clear_all_soft_refs);
1684   _full_gcs_since_conc_gc++;
1685 }
1686 
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1688   GenCollectedHeap* gch = GenCollectedHeap::heap();
1689   unsigned int gc_count = gch->total_full_collections();
1690   if (gc_count == full_gc_count) {
1691     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1692     _full_gc_requested = true;
1693     _full_gc_cause = cause;
1694     CGC_lock->notify();   // nudge CMS thread
1695   } else {
1696     assert(gc_count > full_gc_count, "Error: causal loop");
1697   }
1698 }
1699 
1700 bool CMSCollector::is_external_interruption() {
1701   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1702   return GCCause::is_user_requested_gc(cause) ||
1703          GCCause::is_serviceability_requested_gc(cause);
1704 }
1705 
1706 void CMSCollector::report_concurrent_mode_interruption() {
1707   if (is_external_interruption()) {
1708     if (PrintGCDetails) {
1709       gclog_or_tty->print(" (concurrent mode interrupted)");
1710     }
1711   } else {
1712     if (PrintGCDetails) {
1713       gclog_or_tty->print(" (concurrent mode failure)");
1714     }
1715     _gc_tracer_cm->report_concurrent_mode_failure();
1716   }
1717 }
1718 
1719 
1720 // The foreground and background collectors need to coordinate in order
1721 // to make sure that they do not mutually interfere with CMS collections.
1722 // When a background collection is active,
1723 // the foreground collector may need to take over (preempt) and
1724 // synchronously complete an ongoing collection. Depending on the
1725 // frequency of the background collections and the heap usage
1726 // of the application, this preemption can be seldom or frequent.
1727 // There are only certain
1728 // points in the background collection that the "collection-baton"
1729 // can be passed to the foreground collector.
1730 //
1731 // The foreground collector will wait for the baton before
1732 // starting any part of the collection.  The foreground collector
1733 // will only wait at one location.
1734 //
1735 // The background collector will yield the baton before starting a new
1736 // phase of the collection (e.g., before initial marking, marking from roots,
1737 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1738 // of the loop which switches the phases. The background collector does some
1739 // of the phases (initial mark, final re-mark) with the world stopped.
1740 // Because of locking involved in stopping the world,
1741 // the foreground collector should not block waiting for the background
1742 // collector when it is doing a stop-the-world phase.  The background
1743 // collector will yield the baton at an additional point just before
1744 // it enters a stop-the-world phase.  Once the world is stopped, the
1745 // background collector checks the phase of the collection.  If the
1746 // phase has not changed, it proceeds with the collection.  If the
1747 // phase has changed, it skips that phase of the collection.  See
1748 // the comments on the use of the Heap_lock in collect_in_background().
1749 //
1750 // Variable used in baton passing.
1751 //   _foregroundGCIsActive - Set to true by the foreground collector when
1752 //      it wants the baton.  The foreground clears it when it has finished
1753 //      the collection.
1754 //   _foregroundGCShouldWait - Set to true by the background collector
1755 //        when it is running.  The foreground collector waits while
1756 //      _foregroundGCShouldWait is true.
1757 //  CGC_lock - monitor used to protect access to the above variables
1758 //      and to notify the foreground and background collectors.
1759 //  _collectorState - current state of the CMS collection.
1760 //
1761 // The foreground collector
1762 //   acquires the CGC_lock
1763 //   sets _foregroundGCIsActive
1764 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1765 //     various locks acquired in preparation for the collection
1766 //     are released so as not to block the background collector
1767 //     that is in the midst of a collection
1768 //   proceeds with the collection
1769 //   clears _foregroundGCIsActive
1770 //   returns
1771 //
1772 // The background collector in a loop iterating on the phases of the
1773 //      collection
1774 //   acquires the CGC_lock
1775 //   sets _foregroundGCShouldWait
1776 //   if _foregroundGCIsActive is set
1777 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1778 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1779 //     and exits the loop.
1780 //   otherwise
1781 //     proceed with that phase of the collection
1782 //     if the phase is a stop-the-world phase,
1783 //       yield the baton once more just before enqueueing
1784 //       the stop-world CMS operation (executed by the VM thread).
1785 //   returns after all phases of the collection are done
1786 //
1787 
1788 void CMSCollector::acquire_control_and_collect(bool full,
1789         bool clear_all_soft_refs) {
1790   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1791   assert(!Thread::current()->is_ConcurrentGC_thread(),
1792          "shouldn't try to acquire control from self!");
1793 
1794   // Start the protocol for acquiring control of the
1795   // collection from the background collector (aka CMS thread).
1796   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1797          "VM thread should have CMS token");
1798   // Remember the possibly interrupted state of an ongoing
1799   // concurrent collection
1800   CollectorState first_state = _collectorState;
1801 
1802   // Signal to a possibly ongoing concurrent collection that
1803   // we want to do a foreground collection.
1804   _foregroundGCIsActive = true;
1805 
1806   // Disable incremental mode during a foreground collection.
1807   ICMSDisabler icms_disabler;
1808 
1809   // release locks and wait for a notify from the background collector
1810   // releasing the locks in only necessary for phases which
1811   // do yields to improve the granularity of the collection.
1812   assert_lock_strong(bitMapLock());
1813   // We need to lock the Free list lock for the space that we are
1814   // currently collecting.
1815   assert(haveFreelistLocks(), "Must be holding free list locks");
1816   bitMapLock()->unlock();
1817   releaseFreelistLocks();
1818   {
1819     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1820     if (_foregroundGCShouldWait) {
1821       // We are going to be waiting for action for the CMS thread;
1822       // it had better not be gone (for instance at shutdown)!
1823       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1824              "CMS thread must be running");
1825       // Wait here until the background collector gives us the go-ahead
1826       ConcurrentMarkSweepThread::clear_CMS_flag(
1827         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1828       // Get a possibly blocked CMS thread going:
1829       //   Note that we set _foregroundGCIsActive true above,
1830       //   without protection of the CGC_lock.
1831       CGC_lock->notify();
1832       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1833              "Possible deadlock");
1834       while (_foregroundGCShouldWait) {
1835         // wait for notification
1836         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1837         // Possibility of delay/starvation here, since CMS token does
1838         // not know to give priority to VM thread? Actually, i think
1839         // there wouldn't be any delay/starvation, but the proof of
1840         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1841       }
1842       ConcurrentMarkSweepThread::set_CMS_flag(
1843         ConcurrentMarkSweepThread::CMS_vm_has_token);
1844     }
1845   }
1846   // The CMS_token is already held.  Get back the other locks.
1847   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1848          "VM thread should have CMS token");
1849   getFreelistLocks();
1850   bitMapLock()->lock_without_safepoint_check();
1851   if (TraceCMSState) {
1852     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1853       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1854     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1855   }
1856 
1857   // Check if we need to do a compaction, or if not, whether
1858   // we need to start the mark-sweep from scratch.
1859   bool should_compact    = false;
1860   bool should_start_over = false;
1861   decide_foreground_collection_type(clear_all_soft_refs,
1862     &should_compact, &should_start_over);
1863 
1864 NOT_PRODUCT(
1865   if (RotateCMSCollectionTypes) {
1866     if (_cmsGen->debug_collection_type() ==
1867         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1868       should_compact = true;
1869     } else if (_cmsGen->debug_collection_type() ==
1870                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1871       should_compact = false;
1872     }
1873   }
1874 )
1875 
1876   if (first_state > Idling) {
1877     report_concurrent_mode_interruption();
1878   }
1879 
1880   set_did_compact(should_compact);
1881   if (should_compact) {
1882     // If the collection is being acquired from the background
1883     // collector, there may be references on the discovered
1884     // references lists that have NULL referents (being those
1885     // that were concurrently cleared by a mutator) or
1886     // that are no longer active (having been enqueued concurrently
1887     // by the mutator).
1888     // Scrub the list of those references because Mark-Sweep-Compact
1889     // code assumes referents are not NULL and that all discovered
1890     // Reference objects are active.
1891     ref_processor()->clean_up_discovered_references();
1892 
1893     if (first_state > Idling) {
1894       save_heap_summary();
1895     }
1896 
1897     do_compaction_work(clear_all_soft_refs);
1898 
1899     // Has the GC time limit been exceeded?
1900     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1901     size_t max_eden_size = young_gen->max_capacity() -
1902                            young_gen->to()->capacity() -
1903                            young_gen->from()->capacity();
1904     GenCollectedHeap* gch = GenCollectedHeap::heap();
1905     GCCause::Cause gc_cause = gch->gc_cause();
1906     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1907                                            young_gen->eden()->used(),
1908                                            _cmsGen->max_capacity(),
1909                                            max_eden_size,
1910                                            full,
1911                                            gc_cause,
1912                                            gch->collector_policy());
1913   } else {
1914     do_mark_sweep_work(clear_all_soft_refs, first_state,
1915       should_start_over);
1916   }
1917   // Reset the expansion cause, now that we just completed
1918   // a collection cycle.
1919   clear_expansion_cause();
1920   _foregroundGCIsActive = false;
1921   return;
1922 }
1923 
1924 // Resize the tenured generation
1925 // after obtaining the free list locks for the
1926 // two generations.
1927 void CMSCollector::compute_new_size() {
1928   assert_locked_or_safepoint(Heap_lock);
1929   FreelistLocker z(this);
1930   MetaspaceGC::compute_new_size();
1931   _cmsGen->compute_new_size_free_list();
1932 }
1933 
1934 // A work method used by foreground collection to determine
1935 // what type of collection (compacting or not, continuing or fresh)
1936 // it should do.
1937 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1938 // and CMSCompactWhenClearAllSoftRefs the default in the future
1939 // and do away with the flags after a suitable period.
1940 void CMSCollector::decide_foreground_collection_type(
1941   bool clear_all_soft_refs, bool* should_compact,
1942   bool* should_start_over) {
1943   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1944   // flag is set, and we have either requested a System.gc() or
1945   // the number of full gc's since the last concurrent cycle
1946   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1947   // or if an incremental collection has failed
1948   GenCollectedHeap* gch = GenCollectedHeap::heap();
1949   assert(gch->collector_policy()->is_two_generation_policy(),
1950          "You may want to check the correctness of the following");
1951   // Inform cms gen if this was due to partial collection failing.
1952   // The CMS gen may use this fact to determine its expansion policy.
1953   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1954     assert(!_cmsGen->incremental_collection_failed(),
1955            "Should have been noticed, reacted to and cleared");
1956     _cmsGen->set_incremental_collection_failed();
1957   }
1958   *should_compact =
1959     UseCMSCompactAtFullCollection &&
1960     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1961      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1962      gch->incremental_collection_will_fail(true /* consult_young */));
1963   *should_start_over = false;
1964   if (clear_all_soft_refs && !*should_compact) {
1965     // We are about to do a last ditch collection attempt
1966     // so it would normally make sense to do a compaction
1967     // to reclaim as much space as possible.
1968     if (CMSCompactWhenClearAllSoftRefs) {
1969       // Default: The rationale is that in this case either
1970       // we are past the final marking phase, in which case
1971       // we'd have to start over, or so little has been done
1972       // that there's little point in saving that work. Compaction
1973       // appears to be the sensible choice in either case.
1974       *should_compact = true;
1975     } else {
1976       // We have been asked to clear all soft refs, but not to
1977       // compact. Make sure that we aren't past the final checkpoint
1978       // phase, for that is where we process soft refs. If we are already
1979       // past that phase, we'll need to redo the refs discovery phase and
1980       // if necessary clear soft refs that weren't previously
1981       // cleared. We do so by remembering the phase in which
1982       // we came in, and if we are past the refs processing
1983       // phase, we'll choose to just redo the mark-sweep
1984       // collection from scratch.
1985       if (_collectorState > FinalMarking) {
1986         // We are past the refs processing phase;
1987         // start over and do a fresh synchronous CMS cycle
1988         _collectorState = Resetting; // skip to reset to start new cycle
1989         reset(false /* == !asynch */);
1990         *should_start_over = true;
1991       } // else we can continue a possibly ongoing current cycle
1992     }
1993   }
1994 }
1995 
1996 // A work method used by the foreground collector to do
1997 // a mark-sweep-compact.
1998 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1999   GenCollectedHeap* gch = GenCollectedHeap::heap();
2000 
2001   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2002   gc_timer->register_gc_start(os::elapsed_counter());
2003 
2004   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2005   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2006 
2007   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2008   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2009     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2010       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2011   }
2012 
2013   // Sample collection interval time and reset for collection pause.
2014   if (UseAdaptiveSizePolicy) {
2015     size_policy()->msc_collection_begin();
2016   }
2017 
2018   // Temporarily widen the span of the weak reference processing to
2019   // the entire heap.
2020   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2021   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2022   // Temporarily, clear the "is_alive_non_header" field of the
2023   // reference processor.
2024   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2025   // Temporarily make reference _processing_ single threaded (non-MT).
2026   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2027   // Temporarily make refs discovery atomic
2028   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2029   // Temporarily make reference _discovery_ single threaded (non-MT)
2030   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2031 
2032   ref_processor()->set_enqueuing_is_done(false);
2033   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2034   ref_processor()->setup_policy(clear_all_soft_refs);
2035   // If an asynchronous collection finishes, the _modUnionTable is
2036   // all clear.  If we are assuming the collection from an asynchronous
2037   // collection, clear the _modUnionTable.
2038   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2039     "_modUnionTable should be clear if the baton was not passed");
2040   _modUnionTable.clear_all();
2041   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2042     "mod union for klasses should be clear if the baton was passed");
2043   _ct->klass_rem_set()->clear_mod_union();
2044 
2045   // We must adjust the allocation statistics being maintained
2046   // in the free list space. We do so by reading and clearing
2047   // the sweep timer and updating the block flux rate estimates below.
2048   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2049   if (_inter_sweep_timer.is_active()) {
2050     _inter_sweep_timer.stop();
2051     // Note that we do not use this sample to update the _inter_sweep_estimate.
2052     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2053                                             _inter_sweep_estimate.padded_average(),
2054                                             _intra_sweep_estimate.padded_average());
2055   }
2056 
2057   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2058     ref_processor(), clear_all_soft_refs);
2059   #ifdef ASSERT
2060     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2061     size_t free_size = cms_space->free();
2062     assert(free_size ==
2063            pointer_delta(cms_space->end(), cms_space->compaction_top())
2064            * HeapWordSize,
2065       "All the free space should be compacted into one chunk at top");
2066     assert(cms_space->dictionary()->total_chunk_size(
2067                                       debug_only(cms_space->freelistLock())) == 0 ||
2068            cms_space->totalSizeInIndexedFreeLists() == 0,
2069       "All the free space should be in a single chunk");
2070     size_t num = cms_space->totalCount();
2071     assert((free_size == 0 && num == 0) ||
2072            (free_size > 0  && (num == 1 || num == 2)),
2073          "There should be at most 2 free chunks after compaction");
2074   #endif // ASSERT
2075   _collectorState = Resetting;
2076   assert(_restart_addr == NULL,
2077          "Should have been NULL'd before baton was passed");
2078   reset(false /* == !asynch */);
2079   _cmsGen->reset_after_compaction();
2080   _concurrent_cycles_since_last_unload = 0;
2081 
2082   // Clear any data recorded in the PLAB chunk arrays.
2083   if (_survivor_plab_array != NULL) {
2084     reset_survivor_plab_arrays();
2085   }
2086 
2087   // Adjust the per-size allocation stats for the next epoch.
2088   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2089   // Restart the "inter sweep timer" for the next epoch.
2090   _inter_sweep_timer.reset();
2091   _inter_sweep_timer.start();
2092 
2093   // Sample collection pause time and reset for collection interval.
2094   if (UseAdaptiveSizePolicy) {
2095     size_policy()->msc_collection_end(gch->gc_cause());
2096   }
2097 
2098   gc_timer->register_gc_end(os::elapsed_counter());
2099 
2100   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2101 
2102   // For a mark-sweep-compact, compute_new_size() will be called
2103   // in the heap's do_collection() method.
2104 }
2105 
2106 // A work method used by the foreground collector to do
2107 // a mark-sweep, after taking over from a possibly on-going
2108 // concurrent mark-sweep collection.
2109 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2110   CollectorState first_state, bool should_start_over) {
2111   if (PrintGC && Verbose) {
2112     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2113       "collector with count %d",
2114       _full_gcs_since_conc_gc);
2115   }
2116   switch (_collectorState) {
2117     case Idling:
2118       if (first_state == Idling || should_start_over) {
2119         // The background GC was not active, or should
2120         // restarted from scratch;  start the cycle.
2121         _collectorState = InitialMarking;
2122       }
2123       // If first_state was not Idling, then a background GC
2124       // was in progress and has now finished.  No need to do it
2125       // again.  Leave the state as Idling.
2126       break;
2127     case Precleaning:
2128       // In the foreground case don't do the precleaning since
2129       // it is not done concurrently and there is extra work
2130       // required.
2131       _collectorState = FinalMarking;
2132   }
2133   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2134 
2135   // For a mark-sweep, compute_new_size() will be called
2136   // in the heap's do_collection() method.
2137 }
2138 
2139 
2140 void CMSCollector::getFreelistLocks() const {
2141   // Get locks for all free lists in all generations that this
2142   // collector is responsible for
2143   _cmsGen->freelistLock()->lock_without_safepoint_check();
2144 }
2145 
2146 void CMSCollector::releaseFreelistLocks() const {
2147   // Release locks for all free lists in all generations that this
2148   // collector is responsible for
2149   _cmsGen->freelistLock()->unlock();
2150 }
2151 
2152 bool CMSCollector::haveFreelistLocks() const {
2153   // Check locks for all free lists in all generations that this
2154   // collector is responsible for
2155   assert_lock_strong(_cmsGen->freelistLock());
2156   PRODUCT_ONLY(ShouldNotReachHere());
2157   return true;
2158 }
2159 
2160 // A utility class that is used by the CMS collector to
2161 // temporarily "release" the foreground collector from its
2162 // usual obligation to wait for the background collector to
2163 // complete an ongoing phase before proceeding.
2164 class ReleaseForegroundGC: public StackObj {
2165  private:
2166   CMSCollector* _c;
2167  public:
2168   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2169     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2170     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2171     // allow a potentially blocked foreground collector to proceed
2172     _c->_foregroundGCShouldWait = false;
2173     if (_c->_foregroundGCIsActive) {
2174       CGC_lock->notify();
2175     }
2176     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2177            "Possible deadlock");
2178   }
2179 
2180   ~ReleaseForegroundGC() {
2181     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2182     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2183     _c->_foregroundGCShouldWait = true;
2184   }
2185 };
2186 
2187 // There are separate collect_in_background and collect_in_foreground because of
2188 // the different locking requirements of the background collector and the
2189 // foreground collector.  There was originally an attempt to share
2190 // one "collect" method between the background collector and the foreground
2191 // collector but the if-then-else required made it cleaner to have
2192 // separate methods.
2193 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2194   assert(Thread::current()->is_ConcurrentGC_thread(),
2195     "A CMS asynchronous collection is only allowed on a CMS thread.");
2196 
2197   GenCollectedHeap* gch = GenCollectedHeap::heap();
2198   {
2199     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2200     MutexLockerEx hl(Heap_lock, safepoint_check);
2201     FreelistLocker fll(this);
2202     MutexLockerEx x(CGC_lock, safepoint_check);
2203     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2204       // The foreground collector is active or we're
2205       // not using asynchronous collections.  Skip this
2206       // background collection.
2207       assert(!_foregroundGCShouldWait, "Should be clear");
2208       return;
2209     } else {
2210       assert(_collectorState == Idling, "Should be idling before start.");
2211       _collectorState = InitialMarking;
2212       register_gc_start(cause);
2213       // Reset the expansion cause, now that we are about to begin
2214       // a new cycle.
2215       clear_expansion_cause();
2216 
2217       // Clear the MetaspaceGC flag since a concurrent collection
2218       // is starting but also clear it after the collection.
2219       MetaspaceGC::set_should_concurrent_collect(false);
2220     }
2221     // Decide if we want to enable class unloading as part of the
2222     // ensuing concurrent GC cycle.
2223     update_should_unload_classes();
2224     _full_gc_requested = false;           // acks all outstanding full gc requests
2225     _full_gc_cause = GCCause::_no_gc;
2226     // Signal that we are about to start a collection
2227     gch->increment_total_full_collections();  // ... starting a collection cycle
2228     _collection_count_start = gch->total_full_collections();
2229   }
2230 
2231   // Used for PrintGC
2232   size_t prev_used;
2233   if (PrintGC && Verbose) {
2234     prev_used = _cmsGen->used(); // XXXPERM
2235   }
2236 
2237   // The change of the collection state is normally done at this level;
2238   // the exceptions are phases that are executed while the world is
2239   // stopped.  For those phases the change of state is done while the
2240   // world is stopped.  For baton passing purposes this allows the
2241   // background collector to finish the phase and change state atomically.
2242   // The foreground collector cannot wait on a phase that is done
2243   // while the world is stopped because the foreground collector already
2244   // has the world stopped and would deadlock.
2245   while (_collectorState != Idling) {
2246     if (TraceCMSState) {
2247       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2248         Thread::current(), _collectorState);
2249     }
2250     // The foreground collector
2251     //   holds the Heap_lock throughout its collection.
2252     //   holds the CMS token (but not the lock)
2253     //     except while it is waiting for the background collector to yield.
2254     //
2255     // The foreground collector should be blocked (not for long)
2256     //   if the background collector is about to start a phase
2257     //   executed with world stopped.  If the background
2258     //   collector has already started such a phase, the
2259     //   foreground collector is blocked waiting for the
2260     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2261     //   are executed in the VM thread.
2262     //
2263     // The locking order is
2264     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2265     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2266     //   CMS token  (claimed in
2267     //                stop_world_and_do() -->
2268     //                  safepoint_synchronize() -->
2269     //                    CMSThread::synchronize())
2270 
2271     {
2272       // Check if the FG collector wants us to yield.
2273       CMSTokenSync x(true); // is cms thread
2274       if (waitForForegroundGC()) {
2275         // We yielded to a foreground GC, nothing more to be
2276         // done this round.
2277         assert(_foregroundGCShouldWait == false, "We set it to false in "
2278                "waitForForegroundGC()");
2279         if (TraceCMSState) {
2280           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2281             " exiting collection CMS state %d",
2282             Thread::current(), _collectorState);
2283         }
2284         return;
2285       } else {
2286         // The background collector can run but check to see if the
2287         // foreground collector has done a collection while the
2288         // background collector was waiting to get the CGC_lock
2289         // above.  If yes, break so that _foregroundGCShouldWait
2290         // is cleared before returning.
2291         if (_collectorState == Idling) {
2292           break;
2293         }
2294       }
2295     }
2296 
2297     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2298       "should be waiting");
2299 
2300     switch (_collectorState) {
2301       case InitialMarking:
2302         {
2303           ReleaseForegroundGC x(this);
2304           stats().record_cms_begin();
2305           VM_CMS_Initial_Mark initial_mark_op(this);
2306           VMThread::execute(&initial_mark_op);
2307         }
2308         // The collector state may be any legal state at this point
2309         // since the background collector may have yielded to the
2310         // foreground collector.
2311         break;
2312       case Marking:
2313         // initial marking in checkpointRootsInitialWork has been completed
2314         if (markFromRoots(true)) { // we were successful
2315           assert(_collectorState == Precleaning, "Collector state should "
2316             "have changed");
2317         } else {
2318           assert(_foregroundGCIsActive, "Internal state inconsistency");
2319         }
2320         break;
2321       case Precleaning:
2322         if (UseAdaptiveSizePolicy) {
2323           size_policy()->concurrent_precleaning_begin();
2324         }
2325         // marking from roots in markFromRoots has been completed
2326         preclean();
2327         if (UseAdaptiveSizePolicy) {
2328           size_policy()->concurrent_precleaning_end();
2329         }
2330         assert(_collectorState == AbortablePreclean ||
2331                _collectorState == FinalMarking,
2332                "Collector state should have changed");
2333         break;
2334       case AbortablePreclean:
2335         if (UseAdaptiveSizePolicy) {
2336         size_policy()->concurrent_phases_resume();
2337         }
2338         abortable_preclean();
2339         if (UseAdaptiveSizePolicy) {
2340           size_policy()->concurrent_precleaning_end();
2341         }
2342         assert(_collectorState == FinalMarking, "Collector state should "
2343           "have changed");
2344         break;
2345       case FinalMarking:
2346         {
2347           ReleaseForegroundGC x(this);
2348 
2349           VM_CMS_Final_Remark final_remark_op(this);
2350           VMThread::execute(&final_remark_op);
2351         }
2352         assert(_foregroundGCShouldWait, "block post-condition");
2353         break;
2354       case Sweeping:
2355         if (UseAdaptiveSizePolicy) {
2356           size_policy()->concurrent_sweeping_begin();
2357         }
2358         // final marking in checkpointRootsFinal has been completed
2359         sweep(true);
2360         assert(_collectorState == Resizing, "Collector state change "
2361           "to Resizing must be done under the free_list_lock");
2362         _full_gcs_since_conc_gc = 0;
2363 
2364         // Stop the timers for adaptive size policy for the concurrent phases
2365         if (UseAdaptiveSizePolicy) {
2366           size_policy()->concurrent_sweeping_end();
2367           size_policy()->concurrent_phases_end(gch->gc_cause(),
2368                                              gch->prev_gen(_cmsGen)->capacity(),
2369                                              _cmsGen->free());
2370         }
2371 
2372       case Resizing: {
2373         // Sweeping has been completed...
2374         // At this point the background collection has completed.
2375         // Don't move the call to compute_new_size() down
2376         // into code that might be executed if the background
2377         // collection was preempted.
2378         {
2379           ReleaseForegroundGC x(this);   // unblock FG collection
2380           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2381           CMSTokenSync        z(true);   // not strictly needed.
2382           if (_collectorState == Resizing) {
2383             compute_new_size();
2384             save_heap_summary();
2385             _collectorState = Resetting;
2386           } else {
2387             assert(_collectorState == Idling, "The state should only change"
2388                    " because the foreground collector has finished the collection");
2389           }
2390         }
2391         break;
2392       }
2393       case Resetting:
2394         // CMS heap resizing has been completed
2395         reset(true);
2396         assert(_collectorState == Idling, "Collector state should "
2397           "have changed");
2398 
2399         MetaspaceGC::set_should_concurrent_collect(false);
2400 
2401         stats().record_cms_end();
2402         // Don't move the concurrent_phases_end() and compute_new_size()
2403         // calls to here because a preempted background collection
2404         // has it's state set to "Resetting".
2405         break;
2406       case Idling:
2407       default:
2408         ShouldNotReachHere();
2409         break;
2410     }
2411     if (TraceCMSState) {
2412       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2413         Thread::current(), _collectorState);
2414     }
2415     assert(_foregroundGCShouldWait, "block post-condition");
2416   }
2417 
2418   // Should this be in gc_epilogue?
2419   collector_policy()->counters()->update_counters();
2420 
2421   {
2422     // Clear _foregroundGCShouldWait and, in the event that the
2423     // foreground collector is waiting, notify it, before
2424     // returning.
2425     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2426     _foregroundGCShouldWait = false;
2427     if (_foregroundGCIsActive) {
2428       CGC_lock->notify();
2429     }
2430     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2431            "Possible deadlock");
2432   }
2433   if (TraceCMSState) {
2434     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2435       " exiting collection CMS state %d",
2436       Thread::current(), _collectorState);
2437   }
2438   if (PrintGC && Verbose) {
2439     _cmsGen->print_heap_change(prev_used);
2440   }
2441 }
2442 
2443 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2444   if (!_cms_start_registered) {
2445     register_gc_start(cause);
2446   }
2447 }
2448 
2449 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2450   _cms_start_registered = true;
2451   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2452   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2453 }
2454 
2455 void CMSCollector::register_gc_end() {
2456   if (_cms_start_registered) {
2457     report_heap_summary(GCWhen::AfterGC);
2458 
2459     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2460     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2461     _cms_start_registered = false;
2462   }
2463 }
2464 
2465 void CMSCollector::save_heap_summary() {
2466   GenCollectedHeap* gch = GenCollectedHeap::heap();
2467   _last_heap_summary = gch->create_heap_summary();
2468   _last_metaspace_summary = gch->create_metaspace_summary();
2469 }
2470 
2471 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2472   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2473 }
2474 
2475 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2476   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2477          "Foreground collector should be waiting, not executing");
2478   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2479     "may only be done by the VM Thread with the world stopped");
2480   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2481          "VM thread should have CMS token");
2482 
2483   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2484     true, NULL);)
2485   if (UseAdaptiveSizePolicy) {
2486     size_policy()->ms_collection_begin();
2487   }
2488   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2489 
2490   HandleMark hm;  // Discard invalid handles created during verification
2491 
2492   if (VerifyBeforeGC &&
2493       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2494     Universe::verify();
2495   }
2496 
2497   // Snapshot the soft reference policy to be used in this collection cycle.
2498   ref_processor()->setup_policy(clear_all_soft_refs);
2499 
2500   bool init_mark_was_synchronous = false; // until proven otherwise
2501   while (_collectorState != Idling) {
2502     if (TraceCMSState) {
2503       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2504         Thread::current(), _collectorState);
2505     }
2506     switch (_collectorState) {
2507       case InitialMarking:
2508         register_foreground_gc_start(cause);
2509         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2510         checkpointRootsInitial(false);
2511         assert(_collectorState == Marking, "Collector state should have changed"
2512           " within checkpointRootsInitial()");
2513         break;
2514       case Marking:
2515         // initial marking in checkpointRootsInitialWork has been completed
2516         if (VerifyDuringGC &&
2517             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2518           Universe::verify("Verify before initial mark: ");
2519         }
2520         {
2521           bool res = markFromRoots(false);
2522           assert(res && _collectorState == FinalMarking, "Collector state should "
2523             "have changed");
2524           break;
2525         }
2526       case FinalMarking:
2527         if (VerifyDuringGC &&
2528             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2529           Universe::verify("Verify before re-mark: ");
2530         }
2531         checkpointRootsFinal(false, clear_all_soft_refs,
2532                              init_mark_was_synchronous);
2533         assert(_collectorState == Sweeping, "Collector state should not "
2534           "have changed within checkpointRootsFinal()");
2535         break;
2536       case Sweeping:
2537         // final marking in checkpointRootsFinal has been completed
2538         if (VerifyDuringGC &&
2539             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2540           Universe::verify("Verify before sweep: ");
2541         }
2542         sweep(false);
2543         assert(_collectorState == Resizing, "Incorrect state");
2544         break;
2545       case Resizing: {
2546         // Sweeping has been completed; the actual resize in this case
2547         // is done separately; nothing to be done in this state.
2548         _collectorState = Resetting;
2549         break;
2550       }
2551       case Resetting:
2552         // The heap has been resized.
2553         if (VerifyDuringGC &&
2554             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2555           Universe::verify("Verify before reset: ");
2556         }
2557         save_heap_summary();
2558         reset(false);
2559         assert(_collectorState == Idling, "Collector state should "
2560           "have changed");
2561         break;
2562       case Precleaning:
2563       case AbortablePreclean:
2564         // Elide the preclean phase
2565         _collectorState = FinalMarking;
2566         break;
2567       default:
2568         ShouldNotReachHere();
2569     }
2570     if (TraceCMSState) {
2571       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2572         Thread::current(), _collectorState);
2573     }
2574   }
2575 
2576   if (UseAdaptiveSizePolicy) {
2577     GenCollectedHeap* gch = GenCollectedHeap::heap();
2578     size_policy()->ms_collection_end(gch->gc_cause());
2579   }
2580 
2581   if (VerifyAfterGC &&
2582       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2583     Universe::verify();
2584   }
2585   if (TraceCMSState) {
2586     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2587       " exiting collection CMS state %d",
2588       Thread::current(), _collectorState);
2589   }
2590 }
2591 
2592 bool CMSCollector::waitForForegroundGC() {
2593   bool res = false;
2594   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2595          "CMS thread should have CMS token");
2596   // Block the foreground collector until the
2597   // background collectors decides whether to
2598   // yield.
2599   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2600   _foregroundGCShouldWait = true;
2601   if (_foregroundGCIsActive) {
2602     // The background collector yields to the
2603     // foreground collector and returns a value
2604     // indicating that it has yielded.  The foreground
2605     // collector can proceed.
2606     res = true;
2607     _foregroundGCShouldWait = false;
2608     ConcurrentMarkSweepThread::clear_CMS_flag(
2609       ConcurrentMarkSweepThread::CMS_cms_has_token);
2610     ConcurrentMarkSweepThread::set_CMS_flag(
2611       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2612     // Get a possibly blocked foreground thread going
2613     CGC_lock->notify();
2614     if (TraceCMSState) {
2615       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2616         Thread::current(), _collectorState);
2617     }
2618     while (_foregroundGCIsActive) {
2619       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2620     }
2621     ConcurrentMarkSweepThread::set_CMS_flag(
2622       ConcurrentMarkSweepThread::CMS_cms_has_token);
2623     ConcurrentMarkSweepThread::clear_CMS_flag(
2624       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2625   }
2626   if (TraceCMSState) {
2627     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2628       Thread::current(), _collectorState);
2629   }
2630   return res;
2631 }
2632 
2633 // Because of the need to lock the free lists and other structures in
2634 // the collector, common to all the generations that the collector is
2635 // collecting, we need the gc_prologues of individual CMS generations
2636 // delegate to their collector. It may have been simpler had the
2637 // current infrastructure allowed one to call a prologue on a
2638 // collector. In the absence of that we have the generation's
2639 // prologue delegate to the collector, which delegates back
2640 // some "local" work to a worker method in the individual generations
2641 // that it's responsible for collecting, while itself doing any
2642 // work common to all generations it's responsible for. A similar
2643 // comment applies to the  gc_epilogue()'s.
2644 // The role of the varaible _between_prologue_and_epilogue is to
2645 // enforce the invocation protocol.
2646 void CMSCollector::gc_prologue(bool full) {
2647   // Call gc_prologue_work() for the CMSGen
2648   // we are responsible for.
2649 
2650   // The following locking discipline assumes that we are only called
2651   // when the world is stopped.
2652   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2653 
2654   // The CMSCollector prologue must call the gc_prologues for the
2655   // "generations" that it's responsible
2656   // for.
2657 
2658   assert(   Thread::current()->is_VM_thread()
2659          || (   CMSScavengeBeforeRemark
2660              && Thread::current()->is_ConcurrentGC_thread()),
2661          "Incorrect thread type for prologue execution");
2662 
2663   if (_between_prologue_and_epilogue) {
2664     // We have already been invoked; this is a gc_prologue delegation
2665     // from yet another CMS generation that we are responsible for, just
2666     // ignore it since all relevant work has already been done.
2667     return;
2668   }
2669 
2670   // set a bit saying prologue has been called; cleared in epilogue
2671   _between_prologue_and_epilogue = true;
2672   // Claim locks for common data structures, then call gc_prologue_work()
2673   // for each CMSGen.
2674 
2675   getFreelistLocks();   // gets free list locks on constituent spaces
2676   bitMapLock()->lock_without_safepoint_check();
2677 
2678   // Should call gc_prologue_work() for all cms gens we are responsible for
2679   bool duringMarking =    _collectorState >= Marking
2680                          && _collectorState < Sweeping;
2681 
2682   // The young collections clear the modified oops state, which tells if
2683   // there are any modified oops in the class. The remark phase also needs
2684   // that information. Tell the young collection to save the union of all
2685   // modified klasses.
2686   if (duringMarking) {
2687     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2688   }
2689 
2690   bool registerClosure = duringMarking;
2691 
2692   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2693                                                &_modUnionClosurePar
2694                                                : &_modUnionClosure;
2695   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2696 
2697   if (!full) {
2698     stats().record_gc0_begin();
2699   }
2700 }
2701 
2702 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2703 
2704   _capacity_at_prologue = capacity();
2705   _used_at_prologue = used();
2706 
2707   // Delegate to CMScollector which knows how to coordinate between
2708   // this and any other CMS generations that it is responsible for
2709   // collecting.
2710   collector()->gc_prologue(full);
2711 }
2712 
2713 // This is a "private" interface for use by this generation's CMSCollector.
2714 // Not to be called directly by any other entity (for instance,
2715 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2716 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2717   bool registerClosure, ModUnionClosure* modUnionClosure) {
2718   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2719   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2720     "Should be NULL");
2721   if (registerClosure) {
2722     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2723   }
2724   cmsSpace()->gc_prologue();
2725   // Clear stat counters
2726   NOT_PRODUCT(
2727     assert(_numObjectsPromoted == 0, "check");
2728     assert(_numWordsPromoted   == 0, "check");
2729     if (Verbose && PrintGC) {
2730       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2731                           SIZE_FORMAT" bytes concurrently",
2732       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2733     }
2734     _numObjectsAllocated = 0;
2735     _numWordsAllocated   = 0;
2736   )
2737 }
2738 
2739 void CMSCollector::gc_epilogue(bool full) {
2740   // The following locking discipline assumes that we are only called
2741   // when the world is stopped.
2742   assert(SafepointSynchronize::is_at_safepoint(),
2743          "world is stopped assumption");
2744 
2745   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2746   // if linear allocation blocks need to be appropriately marked to allow the
2747   // the blocks to be parsable. We also check here whether we need to nudge the
2748   // CMS collector thread to start a new cycle (if it's not already active).
2749   assert(   Thread::current()->is_VM_thread()
2750          || (   CMSScavengeBeforeRemark
2751              && Thread::current()->is_ConcurrentGC_thread()),
2752          "Incorrect thread type for epilogue execution");
2753 
2754   if (!_between_prologue_and_epilogue) {
2755     // We have already been invoked; this is a gc_epilogue delegation
2756     // from yet another CMS generation that we are responsible for, just
2757     // ignore it since all relevant work has already been done.
2758     return;
2759   }
2760   assert(haveFreelistLocks(), "must have freelist locks");
2761   assert_lock_strong(bitMapLock());
2762 
2763   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2764 
2765   _cmsGen->gc_epilogue_work(full);
2766 
2767   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2768     // in case sampling was not already enabled, enable it
2769     _start_sampling = true;
2770   }
2771   // reset _eden_chunk_array so sampling starts afresh
2772   _eden_chunk_index = 0;
2773 
2774   size_t cms_used   = _cmsGen->cmsSpace()->used();
2775 
2776   // update performance counters - this uses a special version of
2777   // update_counters() that allows the utilization to be passed as a
2778   // parameter, avoiding multiple calls to used().
2779   //
2780   _cmsGen->update_counters(cms_used);
2781 
2782   if (CMSIncrementalMode) {
2783     icms_update_allocation_limits();
2784   }
2785 
2786   bitMapLock()->unlock();
2787   releaseFreelistLocks();
2788 
2789   if (!CleanChunkPoolAsync) {
2790     Chunk::clean_chunk_pool();
2791   }
2792 
2793   set_did_compact(false);
2794   _between_prologue_and_epilogue = false;  // ready for next cycle
2795 }
2796 
2797 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2798   collector()->gc_epilogue(full);
2799 
2800   // Also reset promotion tracking in par gc thread states.
2801   if (CollectedHeap::use_parallel_gc_threads()) {
2802     for (uint i = 0; i < ParallelGCThreads; i++) {
2803       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2804     }
2805   }
2806 }
2807 
2808 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2809   assert(!incremental_collection_failed(), "Should have been cleared");
2810   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2811   cmsSpace()->gc_epilogue();
2812     // Print stat counters
2813   NOT_PRODUCT(
2814     assert(_numObjectsAllocated == 0, "check");
2815     assert(_numWordsAllocated == 0, "check");
2816     if (Verbose && PrintGC) {
2817       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2818                           SIZE_FORMAT" bytes",
2819                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2820     }
2821     _numObjectsPromoted = 0;
2822     _numWordsPromoted   = 0;
2823   )
2824 
2825   if (PrintGC && Verbose) {
2826     // Call down the chain in contiguous_available needs the freelistLock
2827     // so print this out before releasing the freeListLock.
2828     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2829                         contiguous_available());
2830   }
2831 }
2832 
2833 #ifndef PRODUCT
2834 bool CMSCollector::have_cms_token() {
2835   Thread* thr = Thread::current();
2836   if (thr->is_VM_thread()) {
2837     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2838   } else if (thr->is_ConcurrentGC_thread()) {
2839     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2840   } else if (thr->is_GC_task_thread()) {
2841     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2842            ParGCRareEvent_lock->owned_by_self();
2843   }
2844   return false;
2845 }
2846 #endif
2847 
2848 // Check reachability of the given heap address in CMS generation,
2849 // treating all other generations as roots.
2850 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2851   // We could "guarantee" below, rather than assert, but i'll
2852   // leave these as "asserts" so that an adventurous debugger
2853   // could try this in the product build provided some subset of
2854   // the conditions were met, provided they were intersted in the
2855   // results and knew that the computation below wouldn't interfere
2856   // with other concurrent computations mutating the structures
2857   // being read or written.
2858   assert(SafepointSynchronize::is_at_safepoint(),
2859          "Else mutations in object graph will make answer suspect");
2860   assert(have_cms_token(), "Should hold cms token");
2861   assert(haveFreelistLocks(), "must hold free list locks");
2862   assert_lock_strong(bitMapLock());
2863 
2864   // Clear the marking bit map array before starting, but, just
2865   // for kicks, first report if the given address is already marked
2866   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2867                 _markBitMap.isMarked(addr) ? "" : " not");
2868 
2869   if (verify_after_remark()) {
2870     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2871     bool result = verification_mark_bm()->isMarked(addr);
2872     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2873                            result ? "IS" : "is NOT");
2874     return result;
2875   } else {
2876     gclog_or_tty->print_cr("Could not compute result");
2877     return false;
2878   }
2879 }
2880 
2881 
2882 void
2883 CMSCollector::print_on_error(outputStream* st) {
2884   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2885   if (collector != NULL) {
2886     CMSBitMap* bitmap = &collector->_markBitMap;
2887     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2888     bitmap->print_on_error(st, " Bits: ");
2889 
2890     st->cr();
2891 
2892     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2893     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2894     mut_bitmap->print_on_error(st, " Bits: ");
2895   }
2896 }
2897 
2898 ////////////////////////////////////////////////////////
2899 // CMS Verification Support
2900 ////////////////////////////////////////////////////////
2901 // Following the remark phase, the following invariant
2902 // should hold -- each object in the CMS heap which is
2903 // marked in markBitMap() should be marked in the verification_mark_bm().
2904 
2905 class VerifyMarkedClosure: public BitMapClosure {
2906   CMSBitMap* _marks;
2907   bool       _failed;
2908 
2909  public:
2910   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2911 
2912   bool do_bit(size_t offset) {
2913     HeapWord* addr = _marks->offsetToHeapWord(offset);
2914     if (!_marks->isMarked(addr)) {
2915       oop(addr)->print_on(gclog_or_tty);
2916       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2917       _failed = true;
2918     }
2919     return true;
2920   }
2921 
2922   bool failed() { return _failed; }
2923 };
2924 
2925 bool CMSCollector::verify_after_remark(bool silent) {
2926   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2927   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2928   static bool init = false;
2929 
2930   assert(SafepointSynchronize::is_at_safepoint(),
2931          "Else mutations in object graph will make answer suspect");
2932   assert(have_cms_token(),
2933          "Else there may be mutual interference in use of "
2934          " verification data structures");
2935   assert(_collectorState > Marking && _collectorState <= Sweeping,
2936          "Else marking info checked here may be obsolete");
2937   assert(haveFreelistLocks(), "must hold free list locks");
2938   assert_lock_strong(bitMapLock());
2939 
2940 
2941   // Allocate marking bit map if not already allocated
2942   if (!init) { // first time
2943     if (!verification_mark_bm()->allocate(_span)) {
2944       return false;
2945     }
2946     init = true;
2947   }
2948 
2949   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2950 
2951   // Turn off refs discovery -- so we will be tracing through refs.
2952   // This is as intended, because by this time
2953   // GC must already have cleared any refs that need to be cleared,
2954   // and traced those that need to be marked; moreover,
2955   // the marking done here is not going to intefere in any
2956   // way with the marking information used by GC.
2957   NoRefDiscovery no_discovery(ref_processor());
2958 
2959   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2960 
2961   // Clear any marks from a previous round
2962   verification_mark_bm()->clear_all();
2963   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2964   verify_work_stacks_empty();
2965 
2966   GenCollectedHeap* gch = GenCollectedHeap::heap();
2967   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2968   // Update the saved marks which may affect the root scans.
2969   gch->save_marks();
2970 
2971   if (CMSRemarkVerifyVariant == 1) {
2972     // In this first variant of verification, we complete
2973     // all marking, then check if the new marks-verctor is
2974     // a subset of the CMS marks-vector.
2975     verify_after_remark_work_1();
2976   } else if (CMSRemarkVerifyVariant == 2) {
2977     // In this second variant of verification, we flag an error
2978     // (i.e. an object reachable in the new marks-vector not reachable
2979     // in the CMS marks-vector) immediately, also indicating the
2980     // identify of an object (A) that references the unmarked object (B) --
2981     // presumably, a mutation to A failed to be picked up by preclean/remark?
2982     verify_after_remark_work_2();
2983   } else {
2984     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2985             CMSRemarkVerifyVariant);
2986   }
2987   if (!silent) gclog_or_tty->print(" done] ");
2988   return true;
2989 }
2990 
2991 void CMSCollector::verify_after_remark_work_1() {
2992   ResourceMark rm;
2993   HandleMark  hm;
2994   GenCollectedHeap* gch = GenCollectedHeap::heap();
2995 
2996   // Get a clear set of claim bits for the strong roots processing to work with.
2997   ClassLoaderDataGraph::clear_claimed_marks();
2998 
2999   // Mark from roots one level into CMS
3000   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3001   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3002 
3003   gch->gen_process_strong_roots(_cmsGen->level(),
3004                                 true,   // younger gens are roots
3005                                 true,   // activate StrongRootsScope
3006                                 false,  // not scavenging
3007                                 SharedHeap::ScanningOption(roots_scanning_options()),
3008                                 &notOlder,
3009                                 true,   // walk code active on stacks
3010                                 NULL,
3011                                 NULL); // SSS: Provide correct closure
3012 
3013   // Now mark from the roots
3014   MarkFromRootsClosure markFromRootsClosure(this, _span,
3015     verification_mark_bm(), verification_mark_stack(),
3016     false /* don't yield */, true /* verifying */);
3017   assert(_restart_addr == NULL, "Expected pre-condition");
3018   verification_mark_bm()->iterate(&markFromRootsClosure);
3019   while (_restart_addr != NULL) {
3020     // Deal with stack overflow: by restarting at the indicated
3021     // address.
3022     HeapWord* ra = _restart_addr;
3023     markFromRootsClosure.reset(ra);
3024     _restart_addr = NULL;
3025     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3026   }
3027   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3028   verify_work_stacks_empty();
3029 
3030   // Marking completed -- now verify that each bit marked in
3031   // verification_mark_bm() is also marked in markBitMap(); flag all
3032   // errors by printing corresponding objects.
3033   VerifyMarkedClosure vcl(markBitMap());
3034   verification_mark_bm()->iterate(&vcl);
3035   if (vcl.failed()) {
3036     gclog_or_tty->print("Verification failed");
3037     Universe::heap()->print_on(gclog_or_tty);
3038     fatal("CMS: failed marking verification after remark");
3039   }
3040 }
3041 
3042 class VerifyKlassOopsKlassClosure : public KlassClosure {
3043   class VerifyKlassOopsClosure : public OopClosure {
3044     CMSBitMap* _bitmap;
3045    public:
3046     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3047     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3048     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3049   } _oop_closure;
3050  public:
3051   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3052   void do_klass(Klass* k) {
3053     k->oops_do(&_oop_closure);
3054   }
3055 };
3056 
3057 void CMSCollector::verify_after_remark_work_2() {
3058   ResourceMark rm;
3059   HandleMark  hm;
3060   GenCollectedHeap* gch = GenCollectedHeap::heap();
3061 
3062   // Get a clear set of claim bits for the strong roots processing to work with.
3063   ClassLoaderDataGraph::clear_claimed_marks();
3064 
3065   // Mark from roots one level into CMS
3066   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3067                                      markBitMap());
3068   CMKlassClosure klass_closure(&notOlder);
3069 
3070   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3071   gch->gen_process_strong_roots(_cmsGen->level(),
3072                                 true,   // younger gens are roots
3073                                 true,   // activate StrongRootsScope
3074                                 false,  // not scavenging
3075                                 SharedHeap::ScanningOption(roots_scanning_options()),
3076                                 &notOlder,
3077                                 true,   // walk code active on stacks
3078                                 NULL,
3079                                 &klass_closure);
3080 
3081   // Now mark from the roots
3082   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3083     verification_mark_bm(), markBitMap(), verification_mark_stack());
3084   assert(_restart_addr == NULL, "Expected pre-condition");
3085   verification_mark_bm()->iterate(&markFromRootsClosure);
3086   while (_restart_addr != NULL) {
3087     // Deal with stack overflow: by restarting at the indicated
3088     // address.
3089     HeapWord* ra = _restart_addr;
3090     markFromRootsClosure.reset(ra);
3091     _restart_addr = NULL;
3092     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3093   }
3094   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3095   verify_work_stacks_empty();
3096 
3097   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3098   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3099 
3100   // Marking completed -- now verify that each bit marked in
3101   // verification_mark_bm() is also marked in markBitMap(); flag all
3102   // errors by printing corresponding objects.
3103   VerifyMarkedClosure vcl(markBitMap());
3104   verification_mark_bm()->iterate(&vcl);
3105   assert(!vcl.failed(), "Else verification above should not have succeeded");
3106 }
3107 
3108 void ConcurrentMarkSweepGeneration::save_marks() {
3109   // delegate to CMS space
3110   cmsSpace()->save_marks();
3111   for (uint i = 0; i < ParallelGCThreads; i++) {
3112     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3113   }
3114 }
3115 
3116 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3117   return cmsSpace()->no_allocs_since_save_marks();
3118 }
3119 
3120 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3121                                                                 \
3122 void ConcurrentMarkSweepGeneration::                            \
3123 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3124   cl->set_generation(this);                                     \
3125   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3126   cl->reset_generation();                                       \
3127   save_marks();                                                 \
3128 }
3129 
3130 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3131 
3132 void
3133 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
3134 {
3135   // Not currently implemented; need to do the following. -- ysr.
3136   // dld -- I think that is used for some sort of allocation profiler.  So it
3137   // really means the objects allocated by the mutator since the last
3138   // GC.  We could potentially implement this cheaply by recording only
3139   // the direct allocations in a side data structure.
3140   //
3141   // I think we probably ought not to be required to support these
3142   // iterations at any arbitrary point; I think there ought to be some
3143   // call to enable/disable allocation profiling in a generation/space,
3144   // and the iterator ought to return the objects allocated in the
3145   // gen/space since the enable call, or the last iterator call (which
3146   // will probably be at a GC.)  That way, for gens like CM&S that would
3147   // require some extra data structure to support this, we only pay the
3148   // cost when it's in use...
3149   cmsSpace()->object_iterate_since_last_GC(blk);
3150 }
3151 
3152 void
3153 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3154   cl->set_generation(this);
3155   younger_refs_in_space_iterate(_cmsSpace, cl);
3156   cl->reset_generation();
3157 }
3158 
3159 void
3160 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3161   if (freelistLock()->owned_by_self()) {
3162     Generation::oop_iterate(mr, cl);
3163   } else {
3164     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3165     Generation::oop_iterate(mr, cl);
3166   }
3167 }
3168 
3169 void
3170 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3171   if (freelistLock()->owned_by_self()) {
3172     Generation::oop_iterate(cl);
3173   } else {
3174     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3175     Generation::oop_iterate(cl);
3176   }
3177 }
3178 
3179 void
3180 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3181   if (freelistLock()->owned_by_self()) {
3182     Generation::object_iterate(cl);
3183   } else {
3184     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3185     Generation::object_iterate(cl);
3186   }
3187 }
3188 
3189 void
3190 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3191   if (freelistLock()->owned_by_self()) {
3192     Generation::safe_object_iterate(cl);
3193   } else {
3194     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3195     Generation::safe_object_iterate(cl);
3196   }
3197 }
3198 
3199 void
3200 ConcurrentMarkSweepGeneration::post_compact() {
3201 }
3202 
3203 void
3204 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3205   // Fix the linear allocation blocks to look like free blocks.
3206 
3207   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3208   // are not called when the heap is verified during universe initialization and
3209   // at vm shutdown.
3210   if (freelistLock()->owned_by_self()) {
3211     cmsSpace()->prepare_for_verify();
3212   } else {
3213     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3214     cmsSpace()->prepare_for_verify();
3215   }
3216 }
3217 
3218 void
3219 ConcurrentMarkSweepGeneration::verify() {
3220   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3221   // are not called when the heap is verified during universe initialization and
3222   // at vm shutdown.
3223   if (freelistLock()->owned_by_self()) {
3224     cmsSpace()->verify();
3225   } else {
3226     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3227     cmsSpace()->verify();
3228   }
3229 }
3230 
3231 void CMSCollector::verify() {
3232   _cmsGen->verify();
3233 }
3234 
3235 #ifndef PRODUCT
3236 bool CMSCollector::overflow_list_is_empty() const {
3237   assert(_num_par_pushes >= 0, "Inconsistency");
3238   if (_overflow_list == NULL) {
3239     assert(_num_par_pushes == 0, "Inconsistency");
3240   }
3241   return _overflow_list == NULL;
3242 }
3243 
3244 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3245 // merely consolidate assertion checks that appear to occur together frequently.
3246 void CMSCollector::verify_work_stacks_empty() const {
3247   assert(_markStack.isEmpty(), "Marking stack should be empty");
3248   assert(overflow_list_is_empty(), "Overflow list should be empty");
3249 }
3250 
3251 void CMSCollector::verify_overflow_empty() const {
3252   assert(overflow_list_is_empty(), "Overflow list should be empty");
3253   assert(no_preserved_marks(), "No preserved marks");
3254 }
3255 #endif // PRODUCT
3256 
3257 // Decide if we want to enable class unloading as part of the
3258 // ensuing concurrent GC cycle. We will collect and
3259 // unload classes if it's the case that:
3260 // (1) an explicit gc request has been made and the flag
3261 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3262 // (2) (a) class unloading is enabled at the command line, and
3263 //     (b) old gen is getting really full
3264 // NOTE: Provided there is no change in the state of the heap between
3265 // calls to this method, it should have idempotent results. Moreover,
3266 // its results should be monotonically increasing (i.e. going from 0 to 1,
3267 // but not 1 to 0) between successive calls between which the heap was
3268 // not collected. For the implementation below, it must thus rely on
3269 // the property that concurrent_cycles_since_last_unload()
3270 // will not decrease unless a collection cycle happened and that
3271 // _cmsGen->is_too_full() are
3272 // themselves also monotonic in that sense. See check_monotonicity()
3273 // below.
3274 void CMSCollector::update_should_unload_classes() {
3275   _should_unload_classes = false;
3276   // Condition 1 above
3277   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3278     _should_unload_classes = true;
3279   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3280     // Disjuncts 2.b.(i,ii,iii) above
3281     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3282                               CMSClassUnloadingMaxInterval)
3283                            || _cmsGen->is_too_full();
3284   }
3285 }
3286 
3287 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3288   bool res = should_concurrent_collect();
3289   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3290   return res;
3291 }
3292 
3293 void CMSCollector::setup_cms_unloading_and_verification_state() {
3294   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3295                              || VerifyBeforeExit;
3296   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3297 
3298   if (should_unload_classes()) {   // Should unload classes this cycle
3299     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3300     set_verifying(should_verify);    // Set verification state for this cycle
3301     return;                            // Nothing else needs to be done at this time
3302   }
3303 
3304   // Not unloading classes this cycle
3305   assert(!should_unload_classes(), "Inconsitency!");
3306   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3307     // Include symbols, strings and code cache elements to prevent their resurrection.
3308     add_root_scanning_option(rso);
3309     set_verifying(true);
3310   } else if (verifying() && !should_verify) {
3311     // We were verifying, but some verification flags got disabled.
3312     set_verifying(false);
3313     // Exclude symbols, strings and code cache elements from root scanning to
3314     // reduce IM and RM pauses.
3315     remove_root_scanning_option(rso);
3316   }
3317 }
3318 
3319 
3320 #ifndef PRODUCT
3321 HeapWord* CMSCollector::block_start(const void* p) const {
3322   const HeapWord* addr = (HeapWord*)p;
3323   if (_span.contains(p)) {
3324     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3325       return _cmsGen->cmsSpace()->block_start(p);
3326     }
3327   }
3328   return NULL;
3329 }
3330 #endif
3331 
3332 HeapWord*
3333 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3334                                                    bool   tlab,
3335                                                    bool   parallel) {
3336   CMSSynchronousYieldRequest yr;
3337   assert(!tlab, "Can't deal with TLAB allocation");
3338   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3339   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3340     CMSExpansionCause::_satisfy_allocation);
3341   if (GCExpandToAllocateDelayMillis > 0) {
3342     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3343   }
3344   return have_lock_and_allocate(word_size, tlab);
3345 }
3346 
3347 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3348 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3349 // to CardGeneration and share it...
3350 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3351   return CardGeneration::expand(bytes, expand_bytes);
3352 }
3353 
3354 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3355   CMSExpansionCause::Cause cause)
3356 {
3357 
3358   bool success = expand(bytes, expand_bytes);
3359 
3360   // remember why we expanded; this information is used
3361   // by shouldConcurrentCollect() when making decisions on whether to start
3362   // a new CMS cycle.
3363   if (success) {
3364     set_expansion_cause(cause);
3365     if (PrintGCDetails && Verbose) {
3366       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3367         CMSExpansionCause::to_string(cause));
3368     }
3369   }
3370 }
3371 
3372 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3373   HeapWord* res = NULL;
3374   MutexLocker x(ParGCRareEvent_lock);
3375   while (true) {
3376     // Expansion by some other thread might make alloc OK now:
3377     res = ps->lab.alloc(word_sz);
3378     if (res != NULL) return res;
3379     // If there's not enough expansion space available, give up.
3380     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3381       return NULL;
3382     }
3383     // Otherwise, we try expansion.
3384     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3385       CMSExpansionCause::_allocate_par_lab);
3386     // Now go around the loop and try alloc again;
3387     // A competing par_promote might beat us to the expansion space,
3388     // so we may go around the loop again if promotion fails agaion.
3389     if (GCExpandToAllocateDelayMillis > 0) {
3390       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3391     }
3392   }
3393 }
3394 
3395 
3396 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3397   PromotionInfo* promo) {
3398   MutexLocker x(ParGCRareEvent_lock);
3399   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3400   while (true) {
3401     // Expansion by some other thread might make alloc OK now:
3402     if (promo->ensure_spooling_space()) {
3403       assert(promo->has_spooling_space(),
3404              "Post-condition of successful ensure_spooling_space()");
3405       return true;
3406     }
3407     // If there's not enough expansion space available, give up.
3408     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3409       return false;
3410     }
3411     // Otherwise, we try expansion.
3412     expand(refill_size_bytes, MinHeapDeltaBytes,
3413       CMSExpansionCause::_allocate_par_spooling_space);
3414     // Now go around the loop and try alloc again;
3415     // A competing allocation might beat us to the expansion space,
3416     // so we may go around the loop again if allocation fails again.
3417     if (GCExpandToAllocateDelayMillis > 0) {
3418       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3419     }
3420   }
3421 }
3422 
3423 
3424 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3425   assert_locked_or_safepoint(ExpandHeap_lock);
3426   // Shrink committed space
3427   _virtual_space.shrink_by(bytes);
3428   // Shrink space; this also shrinks the space's BOT
3429   _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3430   size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3431   // Shrink the shared block offset array
3432   _bts->resize(new_word_size);
3433   MemRegion mr(_cmsSpace->bottom(), new_word_size);
3434   // Shrink the card table
3435   Universe::heap()->barrier_set()->resize_covered_region(mr);
3436 
3437   if (Verbose && PrintGC) {
3438     size_t new_mem_size = _virtual_space.committed_size();
3439     size_t old_mem_size = new_mem_size + bytes;
3440     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3441                   name(), old_mem_size/K, new_mem_size/K);
3442   }
3443 }
3444 
3445 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3446   assert_locked_or_safepoint(Heap_lock);
3447   size_t size = ReservedSpace::page_align_size_down(bytes);
3448   if (size > 0) {
3449     shrink_by(size);
3450   }
3451 }
3452 
3453 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3454   assert_locked_or_safepoint(Heap_lock);
3455   bool result = _virtual_space.expand_by(bytes);
3456   if (result) {
3457     size_t new_word_size =
3458       heap_word_size(_virtual_space.committed_size());
3459     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3460     _bts->resize(new_word_size);  // resize the block offset shared array
3461     Universe::heap()->barrier_set()->resize_covered_region(mr);
3462     // Hmmmm... why doesn't CFLS::set_end verify locking?
3463     // This is quite ugly; FIX ME XXX
3464     _cmsSpace->assert_locked(freelistLock());
3465     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3466 
3467     // update the space and generation capacity counters
3468     if (UsePerfData) {
3469       _space_counters->update_capacity();
3470       _gen_counters->update_all();
3471     }
3472 
3473     if (Verbose && PrintGC) {
3474       size_t new_mem_size = _virtual_space.committed_size();
3475       size_t old_mem_size = new_mem_size - bytes;
3476       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3477                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3478     }
3479   }
3480   return result;
3481 }
3482 
3483 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3484   assert_locked_or_safepoint(Heap_lock);
3485   bool success = true;
3486   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3487   if (remaining_bytes > 0) {
3488     success = grow_by(remaining_bytes);
3489     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3490   }
3491   return success;
3492 }
3493 
3494 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3495   assert_locked_or_safepoint(Heap_lock);
3496   assert_lock_strong(freelistLock());
3497   if (PrintGCDetails && Verbose) {
3498     warning("Shrinking of CMS not yet implemented");
3499   }
3500   return;
3501 }
3502 
3503 
3504 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3505 // phases.
3506 class CMSPhaseAccounting: public StackObj {
3507  public:
3508   CMSPhaseAccounting(CMSCollector *collector,
3509                      const char *phase,
3510                      bool print_cr = true);
3511   ~CMSPhaseAccounting();
3512 
3513  private:
3514   CMSCollector *_collector;
3515   const char *_phase;
3516   elapsedTimer _wallclock;
3517   bool _print_cr;
3518 
3519  public:
3520   // Not MT-safe; so do not pass around these StackObj's
3521   // where they may be accessed by other threads.
3522   jlong wallclock_millis() {
3523     assert(_wallclock.is_active(), "Wall clock should not stop");
3524     _wallclock.stop();  // to record time
3525     jlong ret = _wallclock.milliseconds();
3526     _wallclock.start(); // restart
3527     return ret;
3528   }
3529 };
3530 
3531 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3532                                        const char *phase,
3533                                        bool print_cr) :
3534   _collector(collector), _phase(phase), _print_cr(print_cr) {
3535 
3536   if (PrintCMSStatistics != 0) {
3537     _collector->resetYields();
3538   }
3539   if (PrintGCDetails) {
3540     gclog_or_tty->date_stamp(PrintGCDateStamps);
3541     gclog_or_tty->stamp(PrintGCTimeStamps);
3542     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3543       _collector->cmsGen()->short_name(), _phase);
3544   }
3545   _collector->resetTimer();
3546   _wallclock.start();
3547   _collector->startTimer();
3548 }
3549 
3550 CMSPhaseAccounting::~CMSPhaseAccounting() {
3551   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3552   _collector->stopTimer();
3553   _wallclock.stop();
3554   if (PrintGCDetails) {
3555     gclog_or_tty->date_stamp(PrintGCDateStamps);
3556     gclog_or_tty->stamp(PrintGCTimeStamps);
3557     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3558                  _collector->cmsGen()->short_name(),
3559                  _phase, _collector->timerValue(), _wallclock.seconds());
3560     if (_print_cr) {
3561       gclog_or_tty->print_cr("");
3562     }
3563     if (PrintCMSStatistics != 0) {
3564       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3565                     _collector->yields());
3566     }
3567   }
3568 }
3569 
3570 // CMS work
3571 
3572 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3573 class CMSParMarkTask : public AbstractGangTask {
3574  protected:
3575   CMSCollector*     _collector;
3576   int               _n_workers;
3577   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3578       AbstractGangTask(name),
3579       _collector(collector),
3580       _n_workers(n_workers) {}
3581   // Work method in support of parallel rescan ... of young gen spaces
3582   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3583                              ContiguousSpace* space,
3584                              HeapWord** chunk_array, size_t chunk_top);
3585   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3586 };
3587 
3588 // Parallel initial mark task
3589 class CMSParInitialMarkTask: public CMSParMarkTask {
3590  public:
3591   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3592       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3593                      collector, n_workers) {}
3594   void work(uint worker_id);
3595 };
3596 
3597 // Checkpoint the roots into this generation from outside
3598 // this generation. [Note this initial checkpoint need only
3599 // be approximate -- we'll do a catch up phase subsequently.]
3600 void CMSCollector::checkpointRootsInitial(bool asynch) {
3601   assert(_collectorState == InitialMarking, "Wrong collector state");
3602   check_correct_thread_executing();
3603   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3604 
3605   save_heap_summary();
3606   report_heap_summary(GCWhen::BeforeGC);
3607 
3608   ReferenceProcessor* rp = ref_processor();
3609   SpecializationStats::clear();
3610   assert(_restart_addr == NULL, "Control point invariant");
3611   if (asynch) {
3612     // acquire locks for subsequent manipulations
3613     MutexLockerEx x(bitMapLock(),
3614                     Mutex::_no_safepoint_check_flag);
3615     checkpointRootsInitialWork(asynch);
3616     // enable ("weak") refs discovery
3617     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3618     _collectorState = Marking;
3619   } else {
3620     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3621     // which recognizes if we are a CMS generation, and doesn't try to turn on
3622     // discovery; verify that they aren't meddling.
3623     assert(!rp->discovery_is_atomic(),
3624            "incorrect setting of discovery predicate");
3625     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3626            "ref discovery for this generation kind");
3627     // already have locks
3628     checkpointRootsInitialWork(asynch);
3629     // now enable ("weak") refs discovery
3630     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3631     _collectorState = Marking;
3632   }
3633   SpecializationStats::print();
3634 }
3635 
3636 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3637   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3638   assert(_collectorState == InitialMarking, "just checking");
3639 
3640   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3641   // precede our marking with a collection of all
3642   // younger generations to keep floating garbage to a minimum.
3643   // XXX: we won't do this for now -- it's an optimization to be done later.
3644 
3645   // already have locks
3646   assert_lock_strong(bitMapLock());
3647   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3648 
3649   // Setup the verification and class unloading state for this
3650   // CMS collection cycle.
3651   setup_cms_unloading_and_verification_state();
3652 
3653   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3654     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3655   if (UseAdaptiveSizePolicy) {
3656     size_policy()->checkpoint_roots_initial_begin();
3657   }
3658 
3659   // Reset all the PLAB chunk arrays if necessary.
3660   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3661     reset_survivor_plab_arrays();
3662   }
3663 
3664   ResourceMark rm;
3665   HandleMark  hm;
3666 
3667   FalseClosure falseClosure;
3668   // In the case of a synchronous collection, we will elide the
3669   // remark step, so it's important to catch all the nmethod oops
3670   // in this step.
3671   // The final 'true' flag to gen_process_strong_roots will ensure this.
3672   // If 'async' is true, we can relax the nmethod tracing.
3673   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3674   GenCollectedHeap* gch = GenCollectedHeap::heap();
3675 
3676   verify_work_stacks_empty();
3677   verify_overflow_empty();
3678 
3679   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3680   // Update the saved marks which may affect the root scans.
3681   gch->save_marks();
3682 
3683   // weak reference processing has not started yet.
3684   ref_processor()->set_enqueuing_is_done(false);
3685 
3686   // Need to remember all newly created CLDs,
3687   // so that we can guarantee that the remark finds them.
3688   ClassLoaderDataGraph::remember_new_clds(true);
3689 
3690   // Whenever a CLD is found, it will be claimed before proceeding to mark
3691   // the klasses. The claimed marks need to be cleared before marking starts.
3692   ClassLoaderDataGraph::clear_claimed_marks();
3693 
3694   {
3695     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3696     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3697       // The parallel version.
3698       FlexibleWorkGang* workers = gch->workers();
3699       assert(workers != NULL, "Need parallel worker threads.");
3700       int n_workers = workers->active_workers();
3701       CMSParInitialMarkTask tsk(this, n_workers);
3702       gch->set_par_threads(n_workers);
3703       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3704       if (n_workers > 1) {
3705         GenCollectedHeap::StrongRootsScope srs(gch);
3706         workers->run_task(&tsk);
3707       } else {
3708         GenCollectedHeap::StrongRootsScope srs(gch);
3709         tsk.work(0);
3710       }
3711       gch->set_par_threads(0);
3712     } else {
3713       // The serial version.
3714       CMKlassClosure klass_closure(&notOlder);
3715       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3716       gch->gen_process_strong_roots(_cmsGen->level(),
3717                                     true,   // younger gens are roots
3718                                     true,   // activate StrongRootsScope
3719                                     false,  // not scavenging
3720                                     SharedHeap::ScanningOption(roots_scanning_options()),
3721                                     &notOlder,
3722                                     true,   // walk all of code cache if (so & SO_CodeCache)
3723                                     NULL,
3724                                     &klass_closure);
3725     }
3726   }
3727 
3728   // Clear mod-union table; it will be dirtied in the prologue of
3729   // CMS generation per each younger generation collection.
3730 
3731   assert(_modUnionTable.isAllClear(),
3732        "Was cleared in most recent final checkpoint phase"
3733        " or no bits are set in the gc_prologue before the start of the next "
3734        "subsequent marking phase.");
3735 
3736   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3737 
3738   // Save the end of the used_region of the constituent generations
3739   // to be used to limit the extent of sweep in each generation.
3740   save_sweep_limits();
3741   if (UseAdaptiveSizePolicy) {
3742     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3743   }
3744   verify_overflow_empty();
3745 }
3746 
3747 bool CMSCollector::markFromRoots(bool asynch) {
3748   // we might be tempted to assert that:
3749   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3750   //        "inconsistent argument?");
3751   // However that wouldn't be right, because it's possible that
3752   // a safepoint is indeed in progress as a younger generation
3753   // stop-the-world GC happens even as we mark in this generation.
3754   assert(_collectorState == Marking, "inconsistent state?");
3755   check_correct_thread_executing();
3756   verify_overflow_empty();
3757 
3758   bool res;
3759   if (asynch) {
3760 
3761     // Start the timers for adaptive size policy for the concurrent phases
3762     // Do it here so that the foreground MS can use the concurrent
3763     // timer since a foreground MS might has the sweep done concurrently
3764     // or STW.
3765     if (UseAdaptiveSizePolicy) {
3766       size_policy()->concurrent_marking_begin();
3767     }
3768 
3769     // Weak ref discovery note: We may be discovering weak
3770     // refs in this generation concurrent (but interleaved) with
3771     // weak ref discovery by a younger generation collector.
3772 
3773     CMSTokenSyncWithLocks ts(true, bitMapLock());
3774     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3775     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3776     res = markFromRootsWork(asynch);
3777     if (res) {
3778       _collectorState = Precleaning;
3779     } else { // We failed and a foreground collection wants to take over
3780       assert(_foregroundGCIsActive, "internal state inconsistency");
3781       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3782       if (PrintGCDetails) {
3783         gclog_or_tty->print_cr("bailing out to foreground collection");
3784       }
3785     }
3786     if (UseAdaptiveSizePolicy) {
3787       size_policy()->concurrent_marking_end();
3788     }
3789   } else {
3790     assert(SafepointSynchronize::is_at_safepoint(),
3791            "inconsistent with asynch == false");
3792     if (UseAdaptiveSizePolicy) {
3793       size_policy()->ms_collection_marking_begin();
3794     }
3795     // already have locks
3796     res = markFromRootsWork(asynch);
3797     _collectorState = FinalMarking;
3798     if (UseAdaptiveSizePolicy) {
3799       GenCollectedHeap* gch = GenCollectedHeap::heap();
3800       size_policy()->ms_collection_marking_end(gch->gc_cause());
3801     }
3802   }
3803   verify_overflow_empty();
3804   return res;
3805 }
3806 
3807 bool CMSCollector::markFromRootsWork(bool asynch) {
3808   // iterate over marked bits in bit map, doing a full scan and mark
3809   // from these roots using the following algorithm:
3810   // . if oop is to the right of the current scan pointer,
3811   //   mark corresponding bit (we'll process it later)
3812   // . else (oop is to left of current scan pointer)
3813   //   push oop on marking stack
3814   // . drain the marking stack
3815 
3816   // Note that when we do a marking step we need to hold the
3817   // bit map lock -- recall that direct allocation (by mutators)
3818   // and promotion (by younger generation collectors) is also
3819   // marking the bit map. [the so-called allocate live policy.]
3820   // Because the implementation of bit map marking is not
3821   // robust wrt simultaneous marking of bits in the same word,
3822   // we need to make sure that there is no such interference
3823   // between concurrent such updates.
3824 
3825   // already have locks
3826   assert_lock_strong(bitMapLock());
3827 
3828   verify_work_stacks_empty();
3829   verify_overflow_empty();
3830   bool result = false;
3831   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3832     result = do_marking_mt(asynch);
3833   } else {
3834     result = do_marking_st(asynch);
3835   }
3836   return result;
3837 }
3838 
3839 // Forward decl
3840 class CMSConcMarkingTask;
3841 
3842 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3843   CMSCollector*       _collector;
3844   CMSConcMarkingTask* _task;
3845  public:
3846   virtual void yield();
3847 
3848   // "n_threads" is the number of threads to be terminated.
3849   // "queue_set" is a set of work queues of other threads.
3850   // "collector" is the CMS collector associated with this task terminator.
3851   // "yield" indicates whether we need the gang as a whole to yield.
3852   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3853     ParallelTaskTerminator(n_threads, queue_set),
3854     _collector(collector) { }
3855 
3856   void set_task(CMSConcMarkingTask* task) {
3857     _task = task;
3858   }
3859 };
3860 
3861 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3862   CMSConcMarkingTask* _task;
3863  public:
3864   bool should_exit_termination();
3865   void set_task(CMSConcMarkingTask* task) {
3866     _task = task;
3867   }
3868 };
3869 
3870 // MT Concurrent Marking Task
3871 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3872   CMSCollector* _collector;
3873   int           _n_workers;                  // requested/desired # workers
3874   bool          _asynch;
3875   bool          _result;
3876   CompactibleFreeListSpace*  _cms_space;
3877   char          _pad_front[64];   // padding to ...
3878   HeapWord*     _global_finger;   // ... avoid sharing cache line
3879   char          _pad_back[64];
3880   HeapWord*     _restart_addr;
3881 
3882   //  Exposed here for yielding support
3883   Mutex* const _bit_map_lock;
3884 
3885   // The per thread work queues, available here for stealing
3886   OopTaskQueueSet*  _task_queues;
3887 
3888   // Termination (and yielding) support
3889   CMSConcMarkingTerminator _term;
3890   CMSConcMarkingTerminatorTerminator _term_term;
3891 
3892  public:
3893   CMSConcMarkingTask(CMSCollector* collector,
3894                  CompactibleFreeListSpace* cms_space,
3895                  bool asynch,
3896                  YieldingFlexibleWorkGang* workers,
3897                  OopTaskQueueSet* task_queues):
3898     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3899     _collector(collector),
3900     _cms_space(cms_space),
3901     _asynch(asynch), _n_workers(0), _result(true),
3902     _task_queues(task_queues),
3903     _term(_n_workers, task_queues, _collector),
3904     _bit_map_lock(collector->bitMapLock())
3905   {
3906     _requested_size = _n_workers;
3907     _term.set_task(this);
3908     _term_term.set_task(this);
3909     _restart_addr = _global_finger = _cms_space->bottom();
3910   }
3911 
3912 
3913   OopTaskQueueSet* task_queues()  { return _task_queues; }
3914 
3915   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3916 
3917   HeapWord** global_finger_addr() { return &_global_finger; }
3918 
3919   CMSConcMarkingTerminator* terminator() { return &_term; }
3920 
3921   virtual void set_for_termination(int active_workers) {
3922     terminator()->reset_for_reuse(active_workers);
3923   }
3924 
3925   void work(uint worker_id);
3926   bool should_yield() {
3927     return    ConcurrentMarkSweepThread::should_yield()
3928            && !_collector->foregroundGCIsActive()
3929            && _asynch;
3930   }
3931 
3932   virtual void coordinator_yield();  // stuff done by coordinator
3933   bool result() { return _result; }
3934 
3935   void reset(HeapWord* ra) {
3936     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3937     _restart_addr = _global_finger = ra;
3938     _term.reset_for_reuse();
3939   }
3940 
3941   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3942                                            OopTaskQueue* work_q);
3943 
3944  private:
3945   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3946   void do_work_steal(int i);
3947   void bump_global_finger(HeapWord* f);
3948 };
3949 
3950 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3951   assert(_task != NULL, "Error");
3952   return _task->yielding();
3953   // Note that we do not need the disjunct || _task->should_yield() above
3954   // because we want terminating threads to yield only if the task
3955   // is already in the midst of yielding, which happens only after at least one
3956   // thread has yielded.
3957 }
3958 
3959 void CMSConcMarkingTerminator::yield() {
3960   if (_task->should_yield()) {
3961     _task->yield();
3962   } else {
3963     ParallelTaskTerminator::yield();
3964   }
3965 }
3966 
3967 ////////////////////////////////////////////////////////////////
3968 // Concurrent Marking Algorithm Sketch
3969 ////////////////////////////////////////////////////////////////
3970 // Until all tasks exhausted (both spaces):
3971 // -- claim next available chunk
3972 // -- bump global finger via CAS
3973 // -- find first object that starts in this chunk
3974 //    and start scanning bitmap from that position
3975 // -- scan marked objects for oops
3976 // -- CAS-mark target, and if successful:
3977 //    . if target oop is above global finger (volatile read)
3978 //      nothing to do
3979 //    . if target oop is in chunk and above local finger
3980 //        then nothing to do
3981 //    . else push on work-queue
3982 // -- Deal with possible overflow issues:
3983 //    . local work-queue overflow causes stuff to be pushed on
3984 //      global (common) overflow queue
3985 //    . always first empty local work queue
3986 //    . then get a batch of oops from global work queue if any
3987 //    . then do work stealing
3988 // -- When all tasks claimed (both spaces)
3989 //    and local work queue empty,
3990 //    then in a loop do:
3991 //    . check global overflow stack; steal a batch of oops and trace
3992 //    . try to steal from other threads oif GOS is empty
3993 //    . if neither is available, offer termination
3994 // -- Terminate and return result
3995 //
3996 void CMSConcMarkingTask::work(uint worker_id) {
3997   elapsedTimer _timer;
3998   ResourceMark rm;
3999   HandleMark hm;
4000 
4001   DEBUG_ONLY(_collector->verify_overflow_empty();)
4002 
4003   // Before we begin work, our work queue should be empty
4004   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4005   // Scan the bitmap covering _cms_space, tracing through grey objects.
4006   _timer.start();
4007   do_scan_and_mark(worker_id, _cms_space);
4008   _timer.stop();
4009   if (PrintCMSStatistics != 0) {
4010     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4011       worker_id, _timer.seconds());
4012       // XXX: need xxx/xxx type of notation, two timers
4013   }
4014 
4015   // ... do work stealing
4016   _timer.reset();
4017   _timer.start();
4018   do_work_steal(worker_id);
4019   _timer.stop();
4020   if (PrintCMSStatistics != 0) {
4021     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4022       worker_id, _timer.seconds());
4023       // XXX: need xxx/xxx type of notation, two timers
4024   }
4025   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4026   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4027   // Note that under the current task protocol, the
4028   // following assertion is true even of the spaces
4029   // expanded since the completion of the concurrent
4030   // marking. XXX This will likely change under a strict
4031   // ABORT semantics.
4032   // After perm removal the comparison was changed to
4033   // greater than or equal to from strictly greater than.
4034   // Before perm removal the highest address sweep would
4035   // have been at the end of perm gen but now is at the
4036   // end of the tenured gen.
4037   assert(_global_finger >=  _cms_space->end(),
4038          "All tasks have been completed");
4039   DEBUG_ONLY(_collector->verify_overflow_empty();)
4040 }
4041 
4042 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4043   HeapWord* read = _global_finger;
4044   HeapWord* cur  = read;
4045   while (f > read) {
4046     cur = read;
4047     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4048     if (cur == read) {
4049       // our cas succeeded
4050       assert(_global_finger >= f, "protocol consistency");
4051       break;
4052     }
4053   }
4054 }
4055 
4056 // This is really inefficient, and should be redone by
4057 // using (not yet available) block-read and -write interfaces to the
4058 // stack and the work_queue. XXX FIX ME !!!
4059 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4060                                                       OopTaskQueue* work_q) {
4061   // Fast lock-free check
4062   if (ovflw_stk->length() == 0) {
4063     return false;
4064   }
4065   assert(work_q->size() == 0, "Shouldn't steal");
4066   MutexLockerEx ml(ovflw_stk->par_lock(),
4067                    Mutex::_no_safepoint_check_flag);
4068   // Grab up to 1/4 the size of the work queue
4069   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4070                     (size_t)ParGCDesiredObjsFromOverflowList);
4071   num = MIN2(num, ovflw_stk->length());
4072   for (int i = (int) num; i > 0; i--) {
4073     oop cur = ovflw_stk->pop();
4074     assert(cur != NULL, "Counted wrong?");
4075     work_q->push(cur);
4076   }
4077   return num > 0;
4078 }
4079 
4080 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4081   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4082   int n_tasks = pst->n_tasks();
4083   // We allow that there may be no tasks to do here because
4084   // we are restarting after a stack overflow.
4085   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4086   uint nth_task = 0;
4087 
4088   HeapWord* aligned_start = sp->bottom();
4089   if (sp->used_region().contains(_restart_addr)) {
4090     // Align down to a card boundary for the start of 0th task
4091     // for this space.
4092     aligned_start =
4093       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4094                                  CardTableModRefBS::card_size);
4095   }
4096 
4097   size_t chunk_size = sp->marking_task_size();
4098   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4099     // Having claimed the nth task in this space,
4100     // compute the chunk that it corresponds to:
4101     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4102                                aligned_start + (nth_task+1)*chunk_size);
4103     // Try and bump the global finger via a CAS;
4104     // note that we need to do the global finger bump
4105     // _before_ taking the intersection below, because
4106     // the task corresponding to that region will be
4107     // deemed done even if the used_region() expands
4108     // because of allocation -- as it almost certainly will
4109     // during start-up while the threads yield in the
4110     // closure below.
4111     HeapWord* finger = span.end();
4112     bump_global_finger(finger);   // atomically
4113     // There are null tasks here corresponding to chunks
4114     // beyond the "top" address of the space.
4115     span = span.intersection(sp->used_region());
4116     if (!span.is_empty()) {  // Non-null task
4117       HeapWord* prev_obj;
4118       assert(!span.contains(_restart_addr) || nth_task == 0,
4119              "Inconsistency");
4120       if (nth_task == 0) {
4121         // For the 0th task, we'll not need to compute a block_start.
4122         if (span.contains(_restart_addr)) {
4123           // In the case of a restart because of stack overflow,
4124           // we might additionally skip a chunk prefix.
4125           prev_obj = _restart_addr;
4126         } else {
4127           prev_obj = span.start();
4128         }
4129       } else {
4130         // We want to skip the first object because
4131         // the protocol is to scan any object in its entirety
4132         // that _starts_ in this span; a fortiori, any
4133         // object starting in an earlier span is scanned
4134         // as part of an earlier claimed task.
4135         // Below we use the "careful" version of block_start
4136         // so we do not try to navigate uninitialized objects.
4137         prev_obj = sp->block_start_careful(span.start());
4138         // Below we use a variant of block_size that uses the
4139         // Printezis bits to avoid waiting for allocated
4140         // objects to become initialized/parsable.
4141         while (prev_obj < span.start()) {
4142           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4143           if (sz > 0) {
4144             prev_obj += sz;
4145           } else {
4146             // In this case we may end up doing a bit of redundant
4147             // scanning, but that appears unavoidable, short of
4148             // locking the free list locks; see bug 6324141.
4149             break;
4150           }
4151         }
4152       }
4153       if (prev_obj < span.end()) {
4154         MemRegion my_span = MemRegion(prev_obj, span.end());
4155         // Do the marking work within a non-empty span --
4156         // the last argument to the constructor indicates whether the
4157         // iteration should be incremental with periodic yields.
4158         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4159                                     &_collector->_markBitMap,
4160                                     work_queue(i),
4161                                     &_collector->_markStack,
4162                                     _asynch);
4163         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4164       } // else nothing to do for this task
4165     }   // else nothing to do for this task
4166   }
4167   // We'd be tempted to assert here that since there are no
4168   // more tasks left to claim in this space, the global_finger
4169   // must exceed space->top() and a fortiori space->end(). However,
4170   // that would not quite be correct because the bumping of
4171   // global_finger occurs strictly after the claiming of a task,
4172   // so by the time we reach here the global finger may not yet
4173   // have been bumped up by the thread that claimed the last
4174   // task.
4175   pst->all_tasks_completed();
4176 }
4177 
4178 class Par_ConcMarkingClosure: public CMSOopClosure {
4179  private:
4180   CMSCollector* _collector;
4181   CMSConcMarkingTask* _task;
4182   MemRegion     _span;
4183   CMSBitMap*    _bit_map;
4184   CMSMarkStack* _overflow_stack;
4185   OopTaskQueue* _work_queue;
4186  protected:
4187   DO_OOP_WORK_DEFN
4188  public:
4189   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4190                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4191     CMSOopClosure(collector->ref_processor()),
4192     _collector(collector),
4193     _task(task),
4194     _span(collector->_span),
4195     _work_queue(work_queue),
4196     _bit_map(bit_map),
4197     _overflow_stack(overflow_stack)
4198   { }
4199   virtual void do_oop(oop* p);
4200   virtual void do_oop(narrowOop* p);
4201 
4202   void trim_queue(size_t max);
4203   void handle_stack_overflow(HeapWord* lost);
4204   void do_yield_check() {
4205     if (_task->should_yield()) {
4206       _task->yield();
4207     }
4208   }
4209 };
4210 
4211 // Grey object scanning during work stealing phase --
4212 // the salient assumption here is that any references
4213 // that are in these stolen objects being scanned must
4214 // already have been initialized (else they would not have
4215 // been published), so we do not need to check for
4216 // uninitialized objects before pushing here.
4217 void Par_ConcMarkingClosure::do_oop(oop obj) {
4218   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4219   HeapWord* addr = (HeapWord*)obj;
4220   // Check if oop points into the CMS generation
4221   // and is not marked
4222   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4223     // a white object ...
4224     // If we manage to "claim" the object, by being the
4225     // first thread to mark it, then we push it on our
4226     // marking stack
4227     if (_bit_map->par_mark(addr)) {     // ... now grey
4228       // push on work queue (grey set)
4229       bool simulate_overflow = false;
4230       NOT_PRODUCT(
4231         if (CMSMarkStackOverflowALot &&
4232             _collector->simulate_overflow()) {
4233           // simulate a stack overflow
4234           simulate_overflow = true;
4235         }
4236       )
4237       if (simulate_overflow ||
4238           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4239         // stack overflow
4240         if (PrintCMSStatistics != 0) {
4241           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4242                                  SIZE_FORMAT, _overflow_stack->capacity());
4243         }
4244         // We cannot assert that the overflow stack is full because
4245         // it may have been emptied since.
4246         assert(simulate_overflow ||
4247                _work_queue->size() == _work_queue->max_elems(),
4248               "Else push should have succeeded");
4249         handle_stack_overflow(addr);
4250       }
4251     } // Else, some other thread got there first
4252     do_yield_check();
4253   }
4254 }
4255 
4256 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4257 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4258 
4259 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4260   while (_work_queue->size() > max) {
4261     oop new_oop;
4262     if (_work_queue->pop_local(new_oop)) {
4263       assert(new_oop->is_oop(), "Should be an oop");
4264       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4265       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4266       new_oop->oop_iterate(this);  // do_oop() above
4267       do_yield_check();
4268     }
4269   }
4270 }
4271 
4272 // Upon stack overflow, we discard (part of) the stack,
4273 // remembering the least address amongst those discarded
4274 // in CMSCollector's _restart_address.
4275 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4276   // We need to do this under a mutex to prevent other
4277   // workers from interfering with the work done below.
4278   MutexLockerEx ml(_overflow_stack->par_lock(),
4279                    Mutex::_no_safepoint_check_flag);
4280   // Remember the least grey address discarded
4281   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4282   _collector->lower_restart_addr(ra);
4283   _overflow_stack->reset();  // discard stack contents
4284   _overflow_stack->expand(); // expand the stack if possible
4285 }
4286 
4287 
4288 void CMSConcMarkingTask::do_work_steal(int i) {
4289   OopTaskQueue* work_q = work_queue(i);
4290   oop obj_to_scan;
4291   CMSBitMap* bm = &(_collector->_markBitMap);
4292   CMSMarkStack* ovflw = &(_collector->_markStack);
4293   int* seed = _collector->hash_seed(i);
4294   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4295   while (true) {
4296     cl.trim_queue(0);
4297     assert(work_q->size() == 0, "Should have been emptied above");
4298     if (get_work_from_overflow_stack(ovflw, work_q)) {
4299       // Can't assert below because the work obtained from the
4300       // overflow stack may already have been stolen from us.
4301       // assert(work_q->size() > 0, "Work from overflow stack");
4302       continue;
4303     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4304       assert(obj_to_scan->is_oop(), "Should be an oop");
4305       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4306       obj_to_scan->oop_iterate(&cl);
4307     } else if (terminator()->offer_termination(&_term_term)) {
4308       assert(work_q->size() == 0, "Impossible!");
4309       break;
4310     } else if (yielding() || should_yield()) {
4311       yield();
4312     }
4313   }
4314 }
4315 
4316 // This is run by the CMS (coordinator) thread.
4317 void CMSConcMarkingTask::coordinator_yield() {
4318   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4319          "CMS thread should hold CMS token");
4320   // First give up the locks, then yield, then re-lock
4321   // We should probably use a constructor/destructor idiom to
4322   // do this unlock/lock or modify the MutexUnlocker class to
4323   // serve our purpose. XXX
4324   assert_lock_strong(_bit_map_lock);
4325   _bit_map_lock->unlock();
4326   ConcurrentMarkSweepThread::desynchronize(true);
4327   ConcurrentMarkSweepThread::acknowledge_yield_request();
4328   _collector->stopTimer();
4329   if (PrintCMSStatistics != 0) {
4330     _collector->incrementYields();
4331   }
4332   _collector->icms_wait();
4333 
4334   // It is possible for whichever thread initiated the yield request
4335   // not to get a chance to wake up and take the bitmap lock between
4336   // this thread releasing it and reacquiring it. So, while the
4337   // should_yield() flag is on, let's sleep for a bit to give the
4338   // other thread a chance to wake up. The limit imposed on the number
4339   // of iterations is defensive, to avoid any unforseen circumstances
4340   // putting us into an infinite loop. Since it's always been this
4341   // (coordinator_yield()) method that was observed to cause the
4342   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4343   // which is by default non-zero. For the other seven methods that
4344   // also perform the yield operation, as are using a different
4345   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4346   // can enable the sleeping for those methods too, if necessary.
4347   // See 6442774.
4348   //
4349   // We really need to reconsider the synchronization between the GC
4350   // thread and the yield-requesting threads in the future and we
4351   // should really use wait/notify, which is the recommended
4352   // way of doing this type of interaction. Additionally, we should
4353   // consolidate the eight methods that do the yield operation and they
4354   // are almost identical into one for better maintenability and
4355   // readability. See 6445193.
4356   //
4357   // Tony 2006.06.29
4358   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4359                    ConcurrentMarkSweepThread::should_yield() &&
4360                    !CMSCollector::foregroundGCIsActive(); ++i) {
4361     os::sleep(Thread::current(), 1, false);
4362     ConcurrentMarkSweepThread::acknowledge_yield_request();
4363   }
4364 
4365   ConcurrentMarkSweepThread::synchronize(true);
4366   _bit_map_lock->lock_without_safepoint_check();
4367   _collector->startTimer();
4368 }
4369 
4370 bool CMSCollector::do_marking_mt(bool asynch) {
4371   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4372   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4373                                        conc_workers()->total_workers(),
4374                                        conc_workers()->active_workers(),
4375                                        Threads::number_of_non_daemon_threads());
4376   conc_workers()->set_active_workers(num_workers);
4377 
4378   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4379 
4380   CMSConcMarkingTask tsk(this,
4381                          cms_space,
4382                          asynch,
4383                          conc_workers(),
4384                          task_queues());
4385 
4386   // Since the actual number of workers we get may be different
4387   // from the number we requested above, do we need to do anything different
4388   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4389   // class?? XXX
4390   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4391 
4392   // Refs discovery is already non-atomic.
4393   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4394   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4395   conc_workers()->start_task(&tsk);
4396   while (tsk.yielded()) {
4397     tsk.coordinator_yield();
4398     conc_workers()->continue_task(&tsk);
4399   }
4400   // If the task was aborted, _restart_addr will be non-NULL
4401   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4402   while (_restart_addr != NULL) {
4403     // XXX For now we do not make use of ABORTED state and have not
4404     // yet implemented the right abort semantics (even in the original
4405     // single-threaded CMS case). That needs some more investigation
4406     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4407     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4408     // If _restart_addr is non-NULL, a marking stack overflow
4409     // occurred; we need to do a fresh marking iteration from the
4410     // indicated restart address.
4411     if (_foregroundGCIsActive && asynch) {
4412       // We may be running into repeated stack overflows, having
4413       // reached the limit of the stack size, while making very
4414       // slow forward progress. It may be best to bail out and
4415       // let the foreground collector do its job.
4416       // Clear _restart_addr, so that foreground GC
4417       // works from scratch. This avoids the headache of
4418       // a "rescan" which would otherwise be needed because
4419       // of the dirty mod union table & card table.
4420       _restart_addr = NULL;
4421       return false;
4422     }
4423     // Adjust the task to restart from _restart_addr
4424     tsk.reset(_restart_addr);
4425     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4426                   _restart_addr);
4427     _restart_addr = NULL;
4428     // Get the workers going again
4429     conc_workers()->start_task(&tsk);
4430     while (tsk.yielded()) {
4431       tsk.coordinator_yield();
4432       conc_workers()->continue_task(&tsk);
4433     }
4434   }
4435   assert(tsk.completed(), "Inconsistency");
4436   assert(tsk.result() == true, "Inconsistency");
4437   return true;
4438 }
4439 
4440 bool CMSCollector::do_marking_st(bool asynch) {
4441   ResourceMark rm;
4442   HandleMark   hm;
4443 
4444   // Temporarily make refs discovery single threaded (non-MT)
4445   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4446   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4447     &_markStack, CMSYield && asynch);
4448   // the last argument to iterate indicates whether the iteration
4449   // should be incremental with periodic yields.
4450   _markBitMap.iterate(&markFromRootsClosure);
4451   // If _restart_addr is non-NULL, a marking stack overflow
4452   // occurred; we need to do a fresh iteration from the
4453   // indicated restart address.
4454   while (_restart_addr != NULL) {
4455     if (_foregroundGCIsActive && asynch) {
4456       // We may be running into repeated stack overflows, having
4457       // reached the limit of the stack size, while making very
4458       // slow forward progress. It may be best to bail out and
4459       // let the foreground collector do its job.
4460       // Clear _restart_addr, so that foreground GC
4461       // works from scratch. This avoids the headache of
4462       // a "rescan" which would otherwise be needed because
4463       // of the dirty mod union table & card table.
4464       _restart_addr = NULL;
4465       return false;  // indicating failure to complete marking
4466     }
4467     // Deal with stack overflow:
4468     // we restart marking from _restart_addr
4469     HeapWord* ra = _restart_addr;
4470     markFromRootsClosure.reset(ra);
4471     _restart_addr = NULL;
4472     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4473   }
4474   return true;
4475 }
4476 
4477 void CMSCollector::preclean() {
4478   check_correct_thread_executing();
4479   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4480   verify_work_stacks_empty();
4481   verify_overflow_empty();
4482   _abort_preclean = false;
4483   if (CMSPrecleaningEnabled) {
4484     _eden_chunk_index = 0;
4485     size_t used = get_eden_used();
4486     size_t capacity = get_eden_capacity();
4487     // Don't start sampling unless we will get sufficiently
4488     // many samples.
4489     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4490                 * CMSScheduleRemarkEdenPenetration)) {
4491       _start_sampling = true;
4492     } else {
4493       _start_sampling = false;
4494     }
4495     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4496     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4497     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4498   }
4499   CMSTokenSync x(true); // is cms thread
4500   if (CMSPrecleaningEnabled) {
4501     sample_eden();
4502     _collectorState = AbortablePreclean;
4503   } else {
4504     _collectorState = FinalMarking;
4505   }
4506   verify_work_stacks_empty();
4507   verify_overflow_empty();
4508 }
4509 
4510 // Try and schedule the remark such that young gen
4511 // occupancy is CMSScheduleRemarkEdenPenetration %.
4512 void CMSCollector::abortable_preclean() {
4513   check_correct_thread_executing();
4514   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4515   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4516 
4517   // If Eden's current occupancy is below this threshold,
4518   // immediately schedule the remark; else preclean
4519   // past the next scavenge in an effort to
4520   // schedule the pause as described avove. By choosing
4521   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4522   // we will never do an actual abortable preclean cycle.
4523   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4524     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4525     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4526     // We need more smarts in the abortable preclean
4527     // loop below to deal with cases where allocation
4528     // in young gen is very very slow, and our precleaning
4529     // is running a losing race against a horde of
4530     // mutators intent on flooding us with CMS updates
4531     // (dirty cards).
4532     // One, admittedly dumb, strategy is to give up
4533     // after a certain number of abortable precleaning loops
4534     // or after a certain maximum time. We want to make
4535     // this smarter in the next iteration.
4536     // XXX FIX ME!!! YSR
4537     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4538     while (!(should_abort_preclean() ||
4539              ConcurrentMarkSweepThread::should_terminate())) {
4540       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4541       cumworkdone += workdone;
4542       loops++;
4543       // Voluntarily terminate abortable preclean phase if we have
4544       // been at it for too long.
4545       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4546           loops >= CMSMaxAbortablePrecleanLoops) {
4547         if (PrintGCDetails) {
4548           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4549         }
4550         break;
4551       }
4552       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4553         if (PrintGCDetails) {
4554           gclog_or_tty->print(" CMS: abort preclean due to time ");
4555         }
4556         break;
4557       }
4558       // If we are doing little work each iteration, we should
4559       // take a short break.
4560       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4561         // Sleep for some time, waiting for work to accumulate
4562         stopTimer();
4563         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4564         startTimer();
4565         waited++;
4566       }
4567     }
4568     if (PrintCMSStatistics > 0) {
4569       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4570                           loops, waited, cumworkdone);
4571     }
4572   }
4573   CMSTokenSync x(true); // is cms thread
4574   if (_collectorState != Idling) {
4575     assert(_collectorState == AbortablePreclean,
4576            "Spontaneous state transition?");
4577     _collectorState = FinalMarking;
4578   } // Else, a foreground collection completed this CMS cycle.
4579   return;
4580 }
4581 
4582 // Respond to an Eden sampling opportunity
4583 void CMSCollector::sample_eden() {
4584   // Make sure a young gc cannot sneak in between our
4585   // reading and recording of a sample.
4586   assert(Thread::current()->is_ConcurrentGC_thread(),
4587          "Only the cms thread may collect Eden samples");
4588   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4589          "Should collect samples while holding CMS token");
4590   if (!_start_sampling) {
4591     return;
4592   }
4593   if (_eden_chunk_array) {
4594     if (_eden_chunk_index < _eden_chunk_capacity) {
4595       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4596       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4597              "Unexpected state of Eden");
4598       // We'd like to check that what we just sampled is an oop-start address;
4599       // however, we cannot do that here since the object may not yet have been
4600       // initialized. So we'll instead do the check when we _use_ this sample
4601       // later.
4602       if (_eden_chunk_index == 0 ||
4603           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4604                          _eden_chunk_array[_eden_chunk_index-1])
4605            >= CMSSamplingGrain)) {
4606         _eden_chunk_index++;  // commit sample
4607       }
4608     }
4609   }
4610   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4611     size_t used = get_eden_used();
4612     size_t capacity = get_eden_capacity();
4613     assert(used <= capacity, "Unexpected state of Eden");
4614     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4615       _abort_preclean = true;
4616     }
4617   }
4618 }
4619 
4620 
4621 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4622   assert(_collectorState == Precleaning ||
4623          _collectorState == AbortablePreclean, "incorrect state");
4624   ResourceMark rm;
4625   HandleMark   hm;
4626 
4627   // Precleaning is currently not MT but the reference processor
4628   // may be set for MT.  Disable it temporarily here.
4629   ReferenceProcessor* rp = ref_processor();
4630   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4631 
4632   // Do one pass of scrubbing the discovered reference lists
4633   // to remove any reference objects with strongly-reachable
4634   // referents.
4635   if (clean_refs) {
4636     CMSPrecleanRefsYieldClosure yield_cl(this);
4637     assert(rp->span().equals(_span), "Spans should be equal");
4638     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4639                                    &_markStack, true /* preclean */);
4640     CMSDrainMarkingStackClosure complete_trace(this,
4641                                    _span, &_markBitMap, &_markStack,
4642                                    &keep_alive, true /* preclean */);
4643 
4644     // We don't want this step to interfere with a young
4645     // collection because we don't want to take CPU
4646     // or memory bandwidth away from the young GC threads
4647     // (which may be as many as there are CPUs).
4648     // Note that we don't need to protect ourselves from
4649     // interference with mutators because they can't
4650     // manipulate the discovered reference lists nor affect
4651     // the computed reachability of the referents, the
4652     // only properties manipulated by the precleaning
4653     // of these reference lists.
4654     stopTimer();
4655     CMSTokenSyncWithLocks x(true /* is cms thread */,
4656                             bitMapLock());
4657     startTimer();
4658     sample_eden();
4659 
4660     // The following will yield to allow foreground
4661     // collection to proceed promptly. XXX YSR:
4662     // The code in this method may need further
4663     // tweaking for better performance and some restructuring
4664     // for cleaner interfaces.
4665     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4666     rp->preclean_discovered_references(
4667           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4668           gc_timer);
4669   }
4670 
4671   if (clean_survivor) {  // preclean the active survivor space(s)
4672     assert(_young_gen->kind() == Generation::DefNew ||
4673            _young_gen->kind() == Generation::ParNew ||
4674            _young_gen->kind() == Generation::ASParNew,
4675          "incorrect type for cast");
4676     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4677     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4678                              &_markBitMap, &_modUnionTable,
4679                              &_markStack, true /* precleaning phase */);
4680     stopTimer();
4681     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4682                              bitMapLock());
4683     startTimer();
4684     unsigned int before_count =
4685       GenCollectedHeap::heap()->total_collections();
4686     SurvivorSpacePrecleanClosure
4687       sss_cl(this, _span, &_markBitMap, &_markStack,
4688              &pam_cl, before_count, CMSYield);
4689     dng->from()->object_iterate_careful(&sss_cl);
4690     dng->to()->object_iterate_careful(&sss_cl);
4691   }
4692   MarkRefsIntoAndScanClosure
4693     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4694              &_markStack, this, CMSYield,
4695              true /* precleaning phase */);
4696   // CAUTION: The following closure has persistent state that may need to
4697   // be reset upon a decrease in the sequence of addresses it
4698   // processes.
4699   ScanMarkedObjectsAgainCarefullyClosure
4700     smoac_cl(this, _span,
4701       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4702 
4703   // Preclean dirty cards in ModUnionTable and CardTable using
4704   // appropriate convergence criterion;
4705   // repeat CMSPrecleanIter times unless we find that
4706   // we are losing.
4707   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4708   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4709          "Bad convergence multiplier");
4710   assert(CMSPrecleanThreshold >= 100,
4711          "Unreasonably low CMSPrecleanThreshold");
4712 
4713   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4714   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4715        numIter < CMSPrecleanIter;
4716        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4717     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4718     if (Verbose && PrintGCDetails) {
4719       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4720     }
4721     // Either there are very few dirty cards, so re-mark
4722     // pause will be small anyway, or our pre-cleaning isn't
4723     // that much faster than the rate at which cards are being
4724     // dirtied, so we might as well stop and re-mark since
4725     // precleaning won't improve our re-mark time by much.
4726     if (curNumCards <= CMSPrecleanThreshold ||
4727         (numIter > 0 &&
4728          (curNumCards * CMSPrecleanDenominator >
4729          lastNumCards * CMSPrecleanNumerator))) {
4730       numIter++;
4731       cumNumCards += curNumCards;
4732       break;
4733     }
4734   }
4735 
4736   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4737 
4738   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4739   cumNumCards += curNumCards;
4740   if (PrintGCDetails && PrintCMSStatistics != 0) {
4741     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4742                   curNumCards, cumNumCards, numIter);
4743   }
4744   return cumNumCards;   // as a measure of useful work done
4745 }
4746 
4747 // PRECLEANING NOTES:
4748 // Precleaning involves:
4749 // . reading the bits of the modUnionTable and clearing the set bits.
4750 // . For the cards corresponding to the set bits, we scan the
4751 //   objects on those cards. This means we need the free_list_lock
4752 //   so that we can safely iterate over the CMS space when scanning
4753 //   for oops.
4754 // . When we scan the objects, we'll be both reading and setting
4755 //   marks in the marking bit map, so we'll need the marking bit map.
4756 // . For protecting _collector_state transitions, we take the CGC_lock.
4757 //   Note that any races in the reading of of card table entries by the
4758 //   CMS thread on the one hand and the clearing of those entries by the
4759 //   VM thread or the setting of those entries by the mutator threads on the
4760 //   other are quite benign. However, for efficiency it makes sense to keep
4761 //   the VM thread from racing with the CMS thread while the latter is
4762 //   dirty card info to the modUnionTable. We therefore also use the
4763 //   CGC_lock to protect the reading of the card table and the mod union
4764 //   table by the CM thread.
4765 // . We run concurrently with mutator updates, so scanning
4766 //   needs to be done carefully  -- we should not try to scan
4767 //   potentially uninitialized objects.
4768 //
4769 // Locking strategy: While holding the CGC_lock, we scan over and
4770 // reset a maximal dirty range of the mod union / card tables, then lock
4771 // the free_list_lock and bitmap lock to do a full marking, then
4772 // release these locks; and repeat the cycle. This allows for a
4773 // certain amount of fairness in the sharing of these locks between
4774 // the CMS collector on the one hand, and the VM thread and the
4775 // mutators on the other.
4776 
4777 // NOTE: preclean_mod_union_table() and preclean_card_table()
4778 // further below are largely identical; if you need to modify
4779 // one of these methods, please check the other method too.
4780 
4781 size_t CMSCollector::preclean_mod_union_table(
4782   ConcurrentMarkSweepGeneration* gen,
4783   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4784   verify_work_stacks_empty();
4785   verify_overflow_empty();
4786 
4787   // strategy: starting with the first card, accumulate contiguous
4788   // ranges of dirty cards; clear these cards, then scan the region
4789   // covered by these cards.
4790 
4791   // Since all of the MUT is committed ahead, we can just use
4792   // that, in case the generations expand while we are precleaning.
4793   // It might also be fine to just use the committed part of the
4794   // generation, but we might potentially miss cards when the
4795   // generation is rapidly expanding while we are in the midst
4796   // of precleaning.
4797   HeapWord* startAddr = gen->reserved().start();
4798   HeapWord* endAddr   = gen->reserved().end();
4799 
4800   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4801 
4802   size_t numDirtyCards, cumNumDirtyCards;
4803   HeapWord *nextAddr, *lastAddr;
4804   for (cumNumDirtyCards = numDirtyCards = 0,
4805        nextAddr = lastAddr = startAddr;
4806        nextAddr < endAddr;
4807        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4808 
4809     ResourceMark rm;
4810     HandleMark   hm;
4811 
4812     MemRegion dirtyRegion;
4813     {
4814       stopTimer();
4815       // Potential yield point
4816       CMSTokenSync ts(true);
4817       startTimer();
4818       sample_eden();
4819       // Get dirty region starting at nextOffset (inclusive),
4820       // simultaneously clearing it.
4821       dirtyRegion =
4822         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4823       assert(dirtyRegion.start() >= nextAddr,
4824              "returned region inconsistent?");
4825     }
4826     // Remember where the next search should begin.
4827     // The returned region (if non-empty) is a right open interval,
4828     // so lastOffset is obtained from the right end of that
4829     // interval.
4830     lastAddr = dirtyRegion.end();
4831     // Should do something more transparent and less hacky XXX
4832     numDirtyCards =
4833       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4834 
4835     // We'll scan the cards in the dirty region (with periodic
4836     // yields for foreground GC as needed).
4837     if (!dirtyRegion.is_empty()) {
4838       assert(numDirtyCards > 0, "consistency check");
4839       HeapWord* stop_point = NULL;
4840       stopTimer();
4841       // Potential yield point
4842       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4843                                bitMapLock());
4844       startTimer();
4845       {
4846         verify_work_stacks_empty();
4847         verify_overflow_empty();
4848         sample_eden();
4849         stop_point =
4850           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4851       }
4852       if (stop_point != NULL) {
4853         // The careful iteration stopped early either because it found an
4854         // uninitialized object, or because we were in the midst of an
4855         // "abortable preclean", which should now be aborted. Redirty
4856         // the bits corresponding to the partially-scanned or unscanned
4857         // cards. We'll either restart at the next block boundary or
4858         // abort the preclean.
4859         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4860                "Should only be AbortablePreclean.");
4861         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4862         if (should_abort_preclean()) {
4863           break; // out of preclean loop
4864         } else {
4865           // Compute the next address at which preclean should pick up;
4866           // might need bitMapLock in order to read P-bits.
4867           lastAddr = next_card_start_after_block(stop_point);
4868         }
4869       }
4870     } else {
4871       assert(lastAddr == endAddr, "consistency check");
4872       assert(numDirtyCards == 0, "consistency check");
4873       break;
4874     }
4875   }
4876   verify_work_stacks_empty();
4877   verify_overflow_empty();
4878   return cumNumDirtyCards;
4879 }
4880 
4881 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4882 // below are largely identical; if you need to modify
4883 // one of these methods, please check the other method too.
4884 
4885 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4886   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4887   // strategy: it's similar to precleamModUnionTable above, in that
4888   // we accumulate contiguous ranges of dirty cards, mark these cards
4889   // precleaned, then scan the region covered by these cards.
4890   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4891   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4892 
4893   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4894 
4895   size_t numDirtyCards, cumNumDirtyCards;
4896   HeapWord *lastAddr, *nextAddr;
4897 
4898   for (cumNumDirtyCards = numDirtyCards = 0,
4899        nextAddr = lastAddr = startAddr;
4900        nextAddr < endAddr;
4901        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4902 
4903     ResourceMark rm;
4904     HandleMark   hm;
4905 
4906     MemRegion dirtyRegion;
4907     {
4908       // See comments in "Precleaning notes" above on why we
4909       // do this locking. XXX Could the locking overheads be
4910       // too high when dirty cards are sparse? [I don't think so.]
4911       stopTimer();
4912       CMSTokenSync x(true); // is cms thread
4913       startTimer();
4914       sample_eden();
4915       // Get and clear dirty region from card table
4916       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4917                                     MemRegion(nextAddr, endAddr),
4918                                     true,
4919                                     CardTableModRefBS::precleaned_card_val());
4920 
4921       assert(dirtyRegion.start() >= nextAddr,
4922              "returned region inconsistent?");
4923     }
4924     lastAddr = dirtyRegion.end();
4925     numDirtyCards =
4926       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4927 
4928     if (!dirtyRegion.is_empty()) {
4929       stopTimer();
4930       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4931       startTimer();
4932       sample_eden();
4933       verify_work_stacks_empty();
4934       verify_overflow_empty();
4935       HeapWord* stop_point =
4936         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4937       if (stop_point != NULL) {
4938         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4939                "Should only be AbortablePreclean.");
4940         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4941         if (should_abort_preclean()) {
4942           break; // out of preclean loop
4943         } else {
4944           // Compute the next address at which preclean should pick up.
4945           lastAddr = next_card_start_after_block(stop_point);
4946         }
4947       }
4948     } else {
4949       break;
4950     }
4951   }
4952   verify_work_stacks_empty();
4953   verify_overflow_empty();
4954   return cumNumDirtyCards;
4955 }
4956 
4957 class PrecleanKlassClosure : public KlassClosure {
4958   CMKlassClosure _cm_klass_closure;
4959  public:
4960   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4961   void do_klass(Klass* k) {
4962     if (k->has_accumulated_modified_oops()) {
4963       k->clear_accumulated_modified_oops();
4964 
4965       _cm_klass_closure.do_klass(k);
4966     }
4967   }
4968 };
4969 
4970 // The freelist lock is needed to prevent asserts, is it really needed?
4971 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4972 
4973   cl->set_freelistLock(freelistLock);
4974 
4975   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4976 
4977   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4978   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4979   PrecleanKlassClosure preclean_klass_closure(cl);
4980   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4981 
4982   verify_work_stacks_empty();
4983   verify_overflow_empty();
4984 }
4985 
4986 void CMSCollector::checkpointRootsFinal(bool asynch,
4987   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4988   assert(_collectorState == FinalMarking, "incorrect state transition?");
4989   check_correct_thread_executing();
4990   // world is stopped at this checkpoint
4991   assert(SafepointSynchronize::is_at_safepoint(),
4992          "world should be stopped");
4993   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4994 
4995   verify_work_stacks_empty();
4996   verify_overflow_empty();
4997 
4998   SpecializationStats::clear();
4999   if (PrintGCDetails) {
5000     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5001                         _young_gen->used() / K,
5002                         _young_gen->capacity() / K);
5003   }
5004   if (asynch) {
5005     if (CMSScavengeBeforeRemark) {
5006       GenCollectedHeap* gch = GenCollectedHeap::heap();
5007       // Temporarily set flag to false, GCH->do_collection will
5008       // expect it to be false and set to true
5009       FlagSetting fl(gch->_is_gc_active, false);
5010       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5011         PrintGCDetails && Verbose, true, _gc_timer_cm);)
5012       int level = _cmsGen->level() - 1;
5013       if (level >= 0) {
5014         gch->do_collection(true,        // full (i.e. force, see below)
5015                            false,       // !clear_all_soft_refs
5016                            0,           // size
5017                            false,       // is_tlab
5018                            level        // max_level
5019                           );
5020       }
5021     }
5022     FreelistLocker x(this);
5023     MutexLockerEx y(bitMapLock(),
5024                     Mutex::_no_safepoint_check_flag);
5025     assert(!init_mark_was_synchronous, "but that's impossible!");
5026     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5027   } else {
5028     // already have all the locks
5029     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5030                              init_mark_was_synchronous);
5031   }
5032   verify_work_stacks_empty();
5033   verify_overflow_empty();
5034   SpecializationStats::print();
5035 }
5036 
5037 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5038   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5039 
5040   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
5041 
5042   assert(haveFreelistLocks(), "must have free list locks");
5043   assert_lock_strong(bitMapLock());
5044 
5045   if (UseAdaptiveSizePolicy) {
5046     size_policy()->checkpoint_roots_final_begin();
5047   }
5048 
5049   ResourceMark rm;
5050   HandleMark   hm;
5051 
5052   GenCollectedHeap* gch = GenCollectedHeap::heap();
5053 
5054   if (should_unload_classes()) {
5055     CodeCache::gc_prologue();
5056   }
5057   assert(haveFreelistLocks(), "must have free list locks");
5058   assert_lock_strong(bitMapLock());
5059 
5060   if (!init_mark_was_synchronous) {
5061     // We might assume that we need not fill TLAB's when
5062     // CMSScavengeBeforeRemark is set, because we may have just done
5063     // a scavenge which would have filled all TLAB's -- and besides
5064     // Eden would be empty. This however may not always be the case --
5065     // for instance although we asked for a scavenge, it may not have
5066     // happened because of a JNI critical section. We probably need
5067     // a policy for deciding whether we can in that case wait until
5068     // the critical section releases and then do the remark following
5069     // the scavenge, and skip it here. In the absence of that policy,
5070     // or of an indication of whether the scavenge did indeed occur,
5071     // we cannot rely on TLAB's having been filled and must do
5072     // so here just in case a scavenge did not happen.
5073     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5074     // Update the saved marks which may affect the root scans.
5075     gch->save_marks();
5076 
5077     {
5078       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5079 
5080       // Note on the role of the mod union table:
5081       // Since the marker in "markFromRoots" marks concurrently with
5082       // mutators, it is possible for some reachable objects not to have been
5083       // scanned. For instance, an only reference to an object A was
5084       // placed in object B after the marker scanned B. Unless B is rescanned,
5085       // A would be collected. Such updates to references in marked objects
5086       // are detected via the mod union table which is the set of all cards
5087       // dirtied since the first checkpoint in this GC cycle and prior to
5088       // the most recent young generation GC, minus those cleaned up by the
5089       // concurrent precleaning.
5090       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5091         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5092         do_remark_parallel();
5093       } else {
5094         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5095                     _gc_timer_cm);
5096         do_remark_non_parallel();
5097       }
5098     }
5099   } else {
5100     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5101     // The initial mark was stop-world, so there's no rescanning to
5102     // do; go straight on to the next step below.
5103   }
5104   verify_work_stacks_empty();
5105   verify_overflow_empty();
5106 
5107   {
5108     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5109     refProcessingWork(asynch, clear_all_soft_refs);
5110   }
5111   verify_work_stacks_empty();
5112   verify_overflow_empty();
5113 
5114   if (should_unload_classes()) {
5115     CodeCache::gc_epilogue();
5116   }
5117   JvmtiExport::gc_epilogue();
5118 
5119   // If we encountered any (marking stack / work queue) overflow
5120   // events during the current CMS cycle, take appropriate
5121   // remedial measures, where possible, so as to try and avoid
5122   // recurrence of that condition.
5123   assert(_markStack.isEmpty(), "No grey objects");
5124   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5125                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5126   if (ser_ovflw > 0) {
5127     if (PrintCMSStatistics != 0) {
5128       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5129         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5130         ", kac_preclean="SIZE_FORMAT")",
5131         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5132         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5133     }
5134     _markStack.expand();
5135     _ser_pmc_remark_ovflw = 0;
5136     _ser_pmc_preclean_ovflw = 0;
5137     _ser_kac_preclean_ovflw = 0;
5138     _ser_kac_ovflw = 0;
5139   }
5140   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5141     if (PrintCMSStatistics != 0) {
5142       gclog_or_tty->print_cr("Work queue overflow (benign) "
5143         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5144         _par_pmc_remark_ovflw, _par_kac_ovflw);
5145     }
5146     _par_pmc_remark_ovflw = 0;
5147     _par_kac_ovflw = 0;
5148   }
5149   if (PrintCMSStatistics != 0) {
5150      if (_markStack._hit_limit > 0) {
5151        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5152                               _markStack._hit_limit);
5153      }
5154      if (_markStack._failed_double > 0) {
5155        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5156                               " current capacity "SIZE_FORMAT,
5157                               _markStack._failed_double,
5158                               _markStack.capacity());
5159      }
5160   }
5161   _markStack._hit_limit = 0;
5162   _markStack._failed_double = 0;
5163 
5164   if ((VerifyAfterGC || VerifyDuringGC) &&
5165       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5166     verify_after_remark();
5167   }
5168 
5169   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5170 
5171   // Change under the freelistLocks.
5172   _collectorState = Sweeping;
5173   // Call isAllClear() under bitMapLock
5174   assert(_modUnionTable.isAllClear(),
5175       "Should be clear by end of the final marking");
5176   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5177       "Should be clear by end of the final marking");
5178   if (UseAdaptiveSizePolicy) {
5179     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5180   }
5181 }
5182 
5183 void CMSParInitialMarkTask::work(uint worker_id) {
5184   elapsedTimer _timer;
5185   ResourceMark rm;
5186   HandleMark   hm;
5187 
5188   // ---------- scan from roots --------------
5189   _timer.start();
5190   GenCollectedHeap* gch = GenCollectedHeap::heap();
5191   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5192   CMKlassClosure klass_closure(&par_mri_cl);
5193 
5194   // ---------- young gen roots --------------
5195   {
5196     work_on_young_gen_roots(worker_id, &par_mri_cl);
5197     _timer.stop();
5198     if (PrintCMSStatistics != 0) {
5199       gclog_or_tty->print_cr(
5200         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5201         worker_id, _timer.seconds());
5202     }
5203   }
5204 
5205   // ---------- remaining roots --------------
5206   _timer.reset();
5207   _timer.start();
5208   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5209                                 false,     // yg was scanned above
5210                                 false,     // this is parallel code
5211                                 false,     // not scavenging
5212                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5213                                 &par_mri_cl,
5214                                 true,   // walk all of code cache if (so & SO_CodeCache)
5215                                 NULL,
5216                                 &klass_closure);
5217   assert(_collector->should_unload_classes()
5218          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5219          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5220   _timer.stop();
5221   if (PrintCMSStatistics != 0) {
5222     gclog_or_tty->print_cr(
5223       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5224       worker_id, _timer.seconds());
5225   }
5226 }
5227 
5228 // Parallel remark task
5229 class CMSParRemarkTask: public CMSParMarkTask {
5230   CompactibleFreeListSpace* _cms_space;
5231 
5232   // The per-thread work queues, available here for stealing.
5233   OopTaskQueueSet*       _task_queues;
5234   ParallelTaskTerminator _term;
5235 
5236  public:
5237   // A value of 0 passed to n_workers will cause the number of
5238   // workers to be taken from the active workers in the work gang.
5239   CMSParRemarkTask(CMSCollector* collector,
5240                    CompactibleFreeListSpace* cms_space,
5241                    int n_workers, FlexibleWorkGang* workers,
5242                    OopTaskQueueSet* task_queues):
5243     CMSParMarkTask("Rescan roots and grey objects in parallel",
5244                    collector, n_workers),
5245     _cms_space(cms_space),
5246     _task_queues(task_queues),
5247     _term(n_workers, task_queues) { }
5248 
5249   OopTaskQueueSet* task_queues() { return _task_queues; }
5250 
5251   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5252 
5253   ParallelTaskTerminator* terminator() { return &_term; }
5254   int n_workers() { return _n_workers; }
5255 
5256   void work(uint worker_id);
5257 
5258  private:
5259   // ... of  dirty cards in old space
5260   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5261                                   Par_MarkRefsIntoAndScanClosure* cl);
5262 
5263   // ... work stealing for the above
5264   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5265 };
5266 
5267 class RemarkKlassClosure : public KlassClosure {
5268   CMKlassClosure _cm_klass_closure;
5269  public:
5270   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5271   void do_klass(Klass* k) {
5272     // Check if we have modified any oops in the Klass during the concurrent marking.
5273     if (k->has_accumulated_modified_oops()) {
5274       k->clear_accumulated_modified_oops();
5275 
5276       // We could have transfered the current modified marks to the accumulated marks,
5277       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5278     } else if (k->has_modified_oops()) {
5279       // Don't clear anything, this info is needed by the next young collection.
5280     } else {
5281       // No modified oops in the Klass.
5282       return;
5283     }
5284 
5285     // The klass has modified fields, need to scan the klass.
5286     _cm_klass_closure.do_klass(k);
5287   }
5288 };
5289 
5290 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5291   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5292   EdenSpace* eden_space = dng->eden();
5293   ContiguousSpace* from_space = dng->from();
5294   ContiguousSpace* to_space   = dng->to();
5295 
5296   HeapWord** eca = _collector->_eden_chunk_array;
5297   size_t     ect = _collector->_eden_chunk_index;
5298   HeapWord** sca = _collector->_survivor_chunk_array;
5299   size_t     sct = _collector->_survivor_chunk_index;
5300 
5301   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5302   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5303 
5304   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5305   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5306   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5307 }
5308 
5309 // work_queue(i) is passed to the closure
5310 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5311 // also is passed to do_dirty_card_rescan_tasks() and to
5312 // do_work_steal() to select the i-th task_queue.
5313 
5314 void CMSParRemarkTask::work(uint worker_id) {
5315   elapsedTimer _timer;
5316   ResourceMark rm;
5317   HandleMark   hm;
5318 
5319   // ---------- rescan from roots --------------
5320   _timer.start();
5321   GenCollectedHeap* gch = GenCollectedHeap::heap();
5322   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5323     _collector->_span, _collector->ref_processor(),
5324     &(_collector->_markBitMap),
5325     work_queue(worker_id));
5326 
5327   // Rescan young gen roots first since these are likely
5328   // coarsely partitioned and may, on that account, constitute
5329   // the critical path; thus, it's best to start off that
5330   // work first.
5331   // ---------- young gen roots --------------
5332   {
5333     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5334     _timer.stop();
5335     if (PrintCMSStatistics != 0) {
5336       gclog_or_tty->print_cr(
5337         "Finished young gen rescan work in %dth thread: %3.3f sec",
5338         worker_id, _timer.seconds());
5339     }
5340   }
5341 
5342   // ---------- remaining roots --------------
5343   _timer.reset();
5344   _timer.start();
5345   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5346                                 false,     // yg was scanned above
5347                                 false,     // this is parallel code
5348                                 false,     // not scavenging
5349                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5350                                 &par_mrias_cl,
5351                                 true,   // walk all of code cache if (so & SO_CodeCache)
5352                                 NULL,
5353                                 NULL);     // The dirty klasses will be handled below
5354   assert(_collector->should_unload_classes()
5355          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5356          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5357   _timer.stop();
5358   if (PrintCMSStatistics != 0) {
5359     gclog_or_tty->print_cr(
5360       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5361       worker_id, _timer.seconds());
5362   }
5363 
5364   // ---------- unhandled CLD scanning ----------
5365   if (worker_id == 0) { // Single threaded at the moment.
5366     _timer.reset();
5367     _timer.start();
5368 
5369     // Scan all new class loader data objects and new dependencies that were
5370     // introduced during concurrent marking.
5371     ResourceMark rm;
5372     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5373     for (int i = 0; i < array->length(); i++) {
5374       par_mrias_cl.do_class_loader_data(array->at(i));
5375     }
5376 
5377     // We don't need to keep track of new CLDs anymore.
5378     ClassLoaderDataGraph::remember_new_clds(false);
5379 
5380     _timer.stop();
5381     if (PrintCMSStatistics != 0) {
5382       gclog_or_tty->print_cr(
5383           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5384           worker_id, _timer.seconds());
5385     }
5386   }
5387 
5388   // ---------- dirty klass scanning ----------
5389   if (worker_id == 0) { // Single threaded at the moment.
5390     _timer.reset();
5391     _timer.start();
5392 
5393     // Scan all classes that was dirtied during the concurrent marking phase.
5394     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5395     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5396 
5397     _timer.stop();
5398     if (PrintCMSStatistics != 0) {
5399       gclog_or_tty->print_cr(
5400           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5401           worker_id, _timer.seconds());
5402     }
5403   }
5404 
5405   // We might have added oops to ClassLoaderData::_handles during the
5406   // concurrent marking phase. These oops point to newly allocated objects
5407   // that are guaranteed to be kept alive. Either by the direct allocation
5408   // code, or when the young collector processes the strong roots. Hence,
5409   // we don't have to revisit the _handles block during the remark phase.
5410 
5411   // ---------- rescan dirty cards ------------
5412   _timer.reset();
5413   _timer.start();
5414 
5415   // Do the rescan tasks for each of the two spaces
5416   // (cms_space) in turn.
5417   // "worker_id" is passed to select the task_queue for "worker_id"
5418   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5419   _timer.stop();
5420   if (PrintCMSStatistics != 0) {
5421     gclog_or_tty->print_cr(
5422       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5423       worker_id, _timer.seconds());
5424   }
5425 
5426   // ---------- steal work from other threads ...
5427   // ---------- ... and drain overflow list.
5428   _timer.reset();
5429   _timer.start();
5430   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5431   _timer.stop();
5432   if (PrintCMSStatistics != 0) {
5433     gclog_or_tty->print_cr(
5434       "Finished work stealing in %dth thread: %3.3f sec",
5435       worker_id, _timer.seconds());
5436   }
5437 }
5438 
5439 // Note that parameter "i" is not used.
5440 void
5441 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5442   OopsInGenClosure* cl, ContiguousSpace* space,
5443   HeapWord** chunk_array, size_t chunk_top) {
5444   // Until all tasks completed:
5445   // . claim an unclaimed task
5446   // . compute region boundaries corresponding to task claimed
5447   //   using chunk_array
5448   // . par_oop_iterate(cl) over that region
5449 
5450   ResourceMark rm;
5451   HandleMark   hm;
5452 
5453   SequentialSubTasksDone* pst = space->par_seq_tasks();
5454   assert(pst->valid(), "Uninitialized use?");
5455 
5456   uint nth_task = 0;
5457   uint n_tasks  = pst->n_tasks();
5458 
5459   HeapWord *start, *end;
5460   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5461     // We claimed task # nth_task; compute its boundaries.
5462     if (chunk_top == 0) {  // no samples were taken
5463       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5464       start = space->bottom();
5465       end   = space->top();
5466     } else if (nth_task == 0) {
5467       start = space->bottom();
5468       end   = chunk_array[nth_task];
5469     } else if (nth_task < (uint)chunk_top) {
5470       assert(nth_task >= 1, "Control point invariant");
5471       start = chunk_array[nth_task - 1];
5472       end   = chunk_array[nth_task];
5473     } else {
5474       assert(nth_task == (uint)chunk_top, "Control point invariant");
5475       start = chunk_array[chunk_top - 1];
5476       end   = space->top();
5477     }
5478     MemRegion mr(start, end);
5479     // Verify that mr is in space
5480     assert(mr.is_empty() || space->used_region().contains(mr),
5481            "Should be in space");
5482     // Verify that "start" is an object boundary
5483     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5484            "Should be an oop");
5485     space->par_oop_iterate(mr, cl);
5486   }
5487   pst->all_tasks_completed();
5488 }
5489 
5490 void
5491 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5492   CompactibleFreeListSpace* sp, int i,
5493   Par_MarkRefsIntoAndScanClosure* cl) {
5494   // Until all tasks completed:
5495   // . claim an unclaimed task
5496   // . compute region boundaries corresponding to task claimed
5497   // . transfer dirty bits ct->mut for that region
5498   // . apply rescanclosure to dirty mut bits for that region
5499 
5500   ResourceMark rm;
5501   HandleMark   hm;
5502 
5503   OopTaskQueue* work_q = work_queue(i);
5504   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5505   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5506   // CAUTION: This closure has state that persists across calls to
5507   // the work method dirty_range_iterate_clear() in that it has
5508   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5509   // use of that state in the imbedded UpwardsObjectClosure instance
5510   // assumes that the cards are always iterated (even if in parallel
5511   // by several threads) in monotonically increasing order per each
5512   // thread. This is true of the implementation below which picks
5513   // card ranges (chunks) in monotonically increasing order globally
5514   // and, a-fortiori, in monotonically increasing order per thread
5515   // (the latter order being a subsequence of the former).
5516   // If the work code below is ever reorganized into a more chaotic
5517   // work-partitioning form than the current "sequential tasks"
5518   // paradigm, the use of that persistent state will have to be
5519   // revisited and modified appropriately. See also related
5520   // bug 4756801 work on which should examine this code to make
5521   // sure that the changes there do not run counter to the
5522   // assumptions made here and necessary for correctness and
5523   // efficiency. Note also that this code might yield inefficient
5524   // behaviour in the case of very large objects that span one or
5525   // more work chunks. Such objects would potentially be scanned
5526   // several times redundantly. Work on 4756801 should try and
5527   // address that performance anomaly if at all possible. XXX
5528   MemRegion  full_span  = _collector->_span;
5529   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5530   MarkFromDirtyCardsClosure
5531     greyRescanClosure(_collector, full_span, // entire span of interest
5532                       sp, bm, work_q, cl);
5533 
5534   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5535   assert(pst->valid(), "Uninitialized use?");
5536   uint nth_task = 0;
5537   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5538   MemRegion span = sp->used_region();
5539   HeapWord* start_addr = span.start();
5540   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5541                                            alignment);
5542   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5543   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5544          start_addr, "Check alignment");
5545   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5546          chunk_size, "Check alignment");
5547 
5548   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5549     // Having claimed the nth_task, compute corresponding mem-region,
5550     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5551     // The alignment restriction ensures that we do not need any
5552     // synchronization with other gang-workers while setting or
5553     // clearing bits in thus chunk of the MUT.
5554     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5555                                     start_addr + (nth_task+1)*chunk_size);
5556     // The last chunk's end might be way beyond end of the
5557     // used region. In that case pull back appropriately.
5558     if (this_span.end() > end_addr) {
5559       this_span.set_end(end_addr);
5560       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5561     }
5562     // Iterate over the dirty cards covering this chunk, marking them
5563     // precleaned, and setting the corresponding bits in the mod union
5564     // table. Since we have been careful to partition at Card and MUT-word
5565     // boundaries no synchronization is needed between parallel threads.
5566     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5567                                                  &modUnionClosure);
5568 
5569     // Having transferred these marks into the modUnionTable,
5570     // rescan the marked objects on the dirty cards in the modUnionTable.
5571     // Even if this is at a synchronous collection, the initial marking
5572     // may have been done during an asynchronous collection so there
5573     // may be dirty bits in the mod-union table.
5574     _collector->_modUnionTable.dirty_range_iterate_clear(
5575                   this_span, &greyRescanClosure);
5576     _collector->_modUnionTable.verifyNoOneBitsInRange(
5577                                  this_span.start(),
5578                                  this_span.end());
5579   }
5580   pst->all_tasks_completed();  // declare that i am done
5581 }
5582 
5583 // . see if we can share work_queues with ParNew? XXX
5584 void
5585 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5586                                 int* seed) {
5587   OopTaskQueue* work_q = work_queue(i);
5588   NOT_PRODUCT(int num_steals = 0;)
5589   oop obj_to_scan;
5590   CMSBitMap* bm = &(_collector->_markBitMap);
5591 
5592   while (true) {
5593     // Completely finish any left over work from (an) earlier round(s)
5594     cl->trim_queue(0);
5595     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5596                                          (size_t)ParGCDesiredObjsFromOverflowList);
5597     // Now check if there's any work in the overflow list
5598     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5599     // only affects the number of attempts made to get work from the
5600     // overflow list and does not affect the number of workers.  Just
5601     // pass ParallelGCThreads so this behavior is unchanged.
5602     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5603                                                 work_q,
5604                                                 ParallelGCThreads)) {
5605       // found something in global overflow list;
5606       // not yet ready to go stealing work from others.
5607       // We'd like to assert(work_q->size() != 0, ...)
5608       // because we just took work from the overflow list,
5609       // but of course we can't since all of that could have
5610       // been already stolen from us.
5611       // "He giveth and He taketh away."
5612       continue;
5613     }
5614     // Verify that we have no work before we resort to stealing
5615     assert(work_q->size() == 0, "Have work, shouldn't steal");
5616     // Try to steal from other queues that have work
5617     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5618       NOT_PRODUCT(num_steals++;)
5619       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5620       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5621       // Do scanning work
5622       obj_to_scan->oop_iterate(cl);
5623       // Loop around, finish this work, and try to steal some more
5624     } else if (terminator()->offer_termination()) {
5625         break;  // nirvana from the infinite cycle
5626     }
5627   }
5628   NOT_PRODUCT(
5629     if (PrintCMSStatistics != 0) {
5630       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5631     }
5632   )
5633   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5634          "Else our work is not yet done");
5635 }
5636 
5637 // Return a thread-local PLAB recording array, as appropriate.
5638 void* CMSCollector::get_data_recorder(int thr_num) {
5639   if (_survivor_plab_array != NULL &&
5640       (CMSPLABRecordAlways ||
5641        (_collectorState > Marking && _collectorState < FinalMarking))) {
5642     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5643     ChunkArray* ca = &_survivor_plab_array[thr_num];
5644     ca->reset();   // clear it so that fresh data is recorded
5645     return (void*) ca;
5646   } else {
5647     return NULL;
5648   }
5649 }
5650 
5651 // Reset all the thread-local PLAB recording arrays
5652 void CMSCollector::reset_survivor_plab_arrays() {
5653   for (uint i = 0; i < ParallelGCThreads; i++) {
5654     _survivor_plab_array[i].reset();
5655   }
5656 }
5657 
5658 // Merge the per-thread plab arrays into the global survivor chunk
5659 // array which will provide the partitioning of the survivor space
5660 // for CMS initial scan and rescan.
5661 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5662                                               int no_of_gc_threads) {
5663   assert(_survivor_plab_array  != NULL, "Error");
5664   assert(_survivor_chunk_array != NULL, "Error");
5665   assert(_collectorState == FinalMarking ||
5666          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5667   for (int j = 0; j < no_of_gc_threads; j++) {
5668     _cursor[j] = 0;
5669   }
5670   HeapWord* top = surv->top();
5671   size_t i;
5672   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5673     HeapWord* min_val = top;          // Higher than any PLAB address
5674     uint      min_tid = 0;            // position of min_val this round
5675     for (int j = 0; j < no_of_gc_threads; j++) {
5676       ChunkArray* cur_sca = &_survivor_plab_array[j];
5677       if (_cursor[j] == cur_sca->end()) {
5678         continue;
5679       }
5680       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5681       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5682       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5683       if (cur_val < min_val) {
5684         min_tid = j;
5685         min_val = cur_val;
5686       } else {
5687         assert(cur_val < top, "All recorded addresses should be less");
5688       }
5689     }
5690     // At this point min_val and min_tid are respectively
5691     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5692     // and the thread (j) that witnesses that address.
5693     // We record this address in the _survivor_chunk_array[i]
5694     // and increment _cursor[min_tid] prior to the next round i.
5695     if (min_val == top) {
5696       break;
5697     }
5698     _survivor_chunk_array[i] = min_val;
5699     _cursor[min_tid]++;
5700   }
5701   // We are all done; record the size of the _survivor_chunk_array
5702   _survivor_chunk_index = i; // exclusive: [0, i)
5703   if (PrintCMSStatistics > 0) {
5704     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5705   }
5706   // Verify that we used up all the recorded entries
5707   #ifdef ASSERT
5708     size_t total = 0;
5709     for (int j = 0; j < no_of_gc_threads; j++) {
5710       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5711       total += _cursor[j];
5712     }
5713     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5714     // Check that the merged array is in sorted order
5715     if (total > 0) {
5716       for (size_t i = 0; i < total - 1; i++) {
5717         if (PrintCMSStatistics > 0) {
5718           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5719                               i, _survivor_chunk_array[i]);
5720         }
5721         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5722                "Not sorted");
5723       }
5724     }
5725   #endif // ASSERT
5726 }
5727 
5728 // Set up the space's par_seq_tasks structure for work claiming
5729 // for parallel initial scan and rescan of young gen.
5730 // See ParRescanTask where this is currently used.
5731 void
5732 CMSCollector::
5733 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5734   assert(n_threads > 0, "Unexpected n_threads argument");
5735   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5736 
5737   // Eden space
5738   {
5739     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5740     assert(!pst->valid(), "Clobbering existing data?");
5741     // Each valid entry in [0, _eden_chunk_index) represents a task.
5742     size_t n_tasks = _eden_chunk_index + 1;
5743     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5744     // Sets the condition for completion of the subtask (how many threads
5745     // need to finish in order to be done).
5746     pst->set_n_threads(n_threads);
5747     pst->set_n_tasks((int)n_tasks);
5748   }
5749 
5750   // Merge the survivor plab arrays into _survivor_chunk_array
5751   if (_survivor_plab_array != NULL) {
5752     merge_survivor_plab_arrays(dng->from(), n_threads);
5753   } else {
5754     assert(_survivor_chunk_index == 0, "Error");
5755   }
5756 
5757   // To space
5758   {
5759     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5760     assert(!pst->valid(), "Clobbering existing data?");
5761     // Sets the condition for completion of the subtask (how many threads
5762     // need to finish in order to be done).
5763     pst->set_n_threads(n_threads);
5764     pst->set_n_tasks(1);
5765     assert(pst->valid(), "Error");
5766   }
5767 
5768   // From space
5769   {
5770     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5771     assert(!pst->valid(), "Clobbering existing data?");
5772     size_t n_tasks = _survivor_chunk_index + 1;
5773     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5774     // Sets the condition for completion of the subtask (how many threads
5775     // need to finish in order to be done).
5776     pst->set_n_threads(n_threads);
5777     pst->set_n_tasks((int)n_tasks);
5778     assert(pst->valid(), "Error");
5779   }
5780 }
5781 
5782 // Parallel version of remark
5783 void CMSCollector::do_remark_parallel() {
5784   GenCollectedHeap* gch = GenCollectedHeap::heap();
5785   FlexibleWorkGang* workers = gch->workers();
5786   assert(workers != NULL, "Need parallel worker threads.");
5787   // Choose to use the number of GC workers most recently set
5788   // into "active_workers".  If active_workers is not set, set it
5789   // to ParallelGCThreads.
5790   int n_workers = workers->active_workers();
5791   if (n_workers == 0) {
5792     assert(n_workers > 0, "Should have been set during scavenge");
5793     n_workers = ParallelGCThreads;
5794     workers->set_active_workers(n_workers);
5795   }
5796   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5797 
5798   CMSParRemarkTask tsk(this,
5799     cms_space,
5800     n_workers, workers, task_queues());
5801 
5802   // Set up for parallel process_strong_roots work.
5803   gch->set_par_threads(n_workers);
5804   // We won't be iterating over the cards in the card table updating
5805   // the younger_gen cards, so we shouldn't call the following else
5806   // the verification code as well as subsequent younger_refs_iterate
5807   // code would get confused. XXX
5808   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5809 
5810   // The young gen rescan work will not be done as part of
5811   // process_strong_roots (which currently doesn't knw how to
5812   // parallelize such a scan), but rather will be broken up into
5813   // a set of parallel tasks (via the sampling that the [abortable]
5814   // preclean phase did of EdenSpace, plus the [two] tasks of
5815   // scanning the [two] survivor spaces. Further fine-grain
5816   // parallelization of the scanning of the survivor spaces
5817   // themselves, and of precleaning of the younger gen itself
5818   // is deferred to the future.
5819   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5820 
5821   // The dirty card rescan work is broken up into a "sequence"
5822   // of parallel tasks (per constituent space) that are dynamically
5823   // claimed by the parallel threads.
5824   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5825 
5826   // It turns out that even when we're using 1 thread, doing the work in a
5827   // separate thread causes wide variance in run times.  We can't help this
5828   // in the multi-threaded case, but we special-case n=1 here to get
5829   // repeatable measurements of the 1-thread overhead of the parallel code.
5830   if (n_workers > 1) {
5831     // Make refs discovery MT-safe, if it isn't already: it may not
5832     // necessarily be so, since it's possible that we are doing
5833     // ST marking.
5834     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5835     GenCollectedHeap::StrongRootsScope srs(gch);
5836     workers->run_task(&tsk);
5837   } else {
5838     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5839     GenCollectedHeap::StrongRootsScope srs(gch);
5840     tsk.work(0);
5841   }
5842 
5843   gch->set_par_threads(0);  // 0 ==> non-parallel.
5844   // restore, single-threaded for now, any preserved marks
5845   // as a result of work_q overflow
5846   restore_preserved_marks_if_any();
5847 }
5848 
5849 // Non-parallel version of remark
5850 void CMSCollector::do_remark_non_parallel() {
5851   ResourceMark rm;
5852   HandleMark   hm;
5853   GenCollectedHeap* gch = GenCollectedHeap::heap();
5854   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5855 
5856   MarkRefsIntoAndScanClosure
5857     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5858              &_markStack, this,
5859              false /* should_yield */, false /* not precleaning */);
5860   MarkFromDirtyCardsClosure
5861     markFromDirtyCardsClosure(this, _span,
5862                               NULL,  // space is set further below
5863                               &_markBitMap, &_markStack, &mrias_cl);
5864   {
5865     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5866     // Iterate over the dirty cards, setting the corresponding bits in the
5867     // mod union table.
5868     {
5869       ModUnionClosure modUnionClosure(&_modUnionTable);
5870       _ct->ct_bs()->dirty_card_iterate(
5871                       _cmsGen->used_region(),
5872                       &modUnionClosure);
5873     }
5874     // Having transferred these marks into the modUnionTable, we just need
5875     // to rescan the marked objects on the dirty cards in the modUnionTable.
5876     // The initial marking may have been done during an asynchronous
5877     // collection so there may be dirty bits in the mod-union table.
5878     const int alignment =
5879       CardTableModRefBS::card_size * BitsPerWord;
5880     {
5881       // ... First handle dirty cards in CMS gen
5882       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5883       MemRegion ur = _cmsGen->used_region();
5884       HeapWord* lb = ur.start();
5885       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5886       MemRegion cms_span(lb, ub);
5887       _modUnionTable.dirty_range_iterate_clear(cms_span,
5888                                                &markFromDirtyCardsClosure);
5889       verify_work_stacks_empty();
5890       if (PrintCMSStatistics != 0) {
5891         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5892           markFromDirtyCardsClosure.num_dirty_cards());
5893       }
5894     }
5895   }
5896   if (VerifyDuringGC &&
5897       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5898     HandleMark hm;  // Discard invalid handles created during verification
5899     Universe::verify();
5900   }
5901   {
5902     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5903 
5904     verify_work_stacks_empty();
5905 
5906     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5907     GenCollectedHeap::StrongRootsScope srs(gch);
5908     gch->gen_process_strong_roots(_cmsGen->level(),
5909                                   true,  // younger gens as roots
5910                                   false, // use the local StrongRootsScope
5911                                   false, // not scavenging
5912                                   SharedHeap::ScanningOption(roots_scanning_options()),
5913                                   &mrias_cl,
5914                                   true,   // walk code active on stacks
5915                                   NULL,
5916                                   NULL);  // The dirty klasses will be handled below
5917 
5918     assert(should_unload_classes()
5919            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5920            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5921   }
5922 
5923   {
5924     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5925 
5926     verify_work_stacks_empty();
5927 
5928     // Scan all class loader data objects that might have been introduced
5929     // during concurrent marking.
5930     ResourceMark rm;
5931     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5932     for (int i = 0; i < array->length(); i++) {
5933       mrias_cl.do_class_loader_data(array->at(i));
5934     }
5935 
5936     // We don't need to keep track of new CLDs anymore.
5937     ClassLoaderDataGraph::remember_new_clds(false);
5938 
5939     verify_work_stacks_empty();
5940   }
5941 
5942   {
5943     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5944 
5945     verify_work_stacks_empty();
5946 
5947     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5948     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5949 
5950     verify_work_stacks_empty();
5951   }
5952 
5953   // We might have added oops to ClassLoaderData::_handles during the
5954   // concurrent marking phase. These oops point to newly allocated objects
5955   // that are guaranteed to be kept alive. Either by the direct allocation
5956   // code, or when the young collector processes the strong roots. Hence,
5957   // we don't have to revisit the _handles block during the remark phase.
5958 
5959   verify_work_stacks_empty();
5960   // Restore evacuated mark words, if any, used for overflow list links
5961   if (!CMSOverflowEarlyRestoration) {
5962     restore_preserved_marks_if_any();
5963   }
5964   verify_overflow_empty();
5965 }
5966 
5967 ////////////////////////////////////////////////////////
5968 // Parallel Reference Processing Task Proxy Class
5969 ////////////////////////////////////////////////////////
5970 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5971   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5972   CMSCollector*          _collector;
5973   CMSBitMap*             _mark_bit_map;
5974   const MemRegion        _span;
5975   ProcessTask&           _task;
5976 
5977 public:
5978   CMSRefProcTaskProxy(ProcessTask&     task,
5979                       CMSCollector*    collector,
5980                       const MemRegion& span,
5981                       CMSBitMap*       mark_bit_map,
5982                       AbstractWorkGang* workers,
5983                       OopTaskQueueSet* task_queues):
5984     // XXX Should superclass AGTWOQ also know about AWG since it knows
5985     // about the task_queues used by the AWG? Then it could initialize
5986     // the terminator() object. See 6984287. The set_for_termination()
5987     // below is a temporary band-aid for the regression in 6984287.
5988     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5989       task_queues),
5990     _task(task),
5991     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5992   {
5993     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5994            "Inconsistency in _span");
5995     set_for_termination(workers->active_workers());
5996   }
5997 
5998   OopTaskQueueSet* task_queues() { return queues(); }
5999 
6000   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6001 
6002   void do_work_steal(int i,
6003                      CMSParDrainMarkingStackClosure* drain,
6004                      CMSParKeepAliveClosure* keep_alive,
6005                      int* seed);
6006 
6007   virtual void work(uint worker_id);
6008 };
6009 
6010 void CMSRefProcTaskProxy::work(uint worker_id) {
6011   assert(_collector->_span.equals(_span), "Inconsistency in _span");
6012   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6013                                         _mark_bit_map,
6014                                         work_queue(worker_id));
6015   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6016                                                  _mark_bit_map,
6017                                                  work_queue(worker_id));
6018   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6019   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6020   if (_task.marks_oops_alive()) {
6021     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6022                   _collector->hash_seed(worker_id));
6023   }
6024   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6025   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6026 }
6027 
6028 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6029   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6030   EnqueueTask& _task;
6031 
6032 public:
6033   CMSRefEnqueueTaskProxy(EnqueueTask& task)
6034     : AbstractGangTask("Enqueue reference objects in parallel"),
6035       _task(task)
6036   { }
6037 
6038   virtual void work(uint worker_id)
6039   {
6040     _task.work(worker_id);
6041   }
6042 };
6043 
6044 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6045   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6046    _span(span),
6047    _bit_map(bit_map),
6048    _work_queue(work_queue),
6049    _mark_and_push(collector, span, bit_map, work_queue),
6050    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6051                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6052 { }
6053 
6054 // . see if we can share work_queues with ParNew? XXX
6055 void CMSRefProcTaskProxy::do_work_steal(int i,
6056   CMSParDrainMarkingStackClosure* drain,
6057   CMSParKeepAliveClosure* keep_alive,
6058   int* seed) {
6059   OopTaskQueue* work_q = work_queue(i);
6060   NOT_PRODUCT(int num_steals = 0;)
6061   oop obj_to_scan;
6062 
6063   while (true) {
6064     // Completely finish any left over work from (an) earlier round(s)
6065     drain->trim_queue(0);
6066     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6067                                          (size_t)ParGCDesiredObjsFromOverflowList);
6068     // Now check if there's any work in the overflow list
6069     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6070     // only affects the number of attempts made to get work from the
6071     // overflow list and does not affect the number of workers.  Just
6072     // pass ParallelGCThreads so this behavior is unchanged.
6073     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6074                                                 work_q,
6075                                                 ParallelGCThreads)) {
6076       // Found something in global overflow list;
6077       // not yet ready to go stealing work from others.
6078       // We'd like to assert(work_q->size() != 0, ...)
6079       // because we just took work from the overflow list,
6080       // but of course we can't, since all of that might have
6081       // been already stolen from us.
6082       continue;
6083     }
6084     // Verify that we have no work before we resort to stealing
6085     assert(work_q->size() == 0, "Have work, shouldn't steal");
6086     // Try to steal from other queues that have work
6087     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6088       NOT_PRODUCT(num_steals++;)
6089       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6090       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6091       // Do scanning work
6092       obj_to_scan->oop_iterate(keep_alive);
6093       // Loop around, finish this work, and try to steal some more
6094     } else if (terminator()->offer_termination()) {
6095       break;  // nirvana from the infinite cycle
6096     }
6097   }
6098   NOT_PRODUCT(
6099     if (PrintCMSStatistics != 0) {
6100       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6101     }
6102   )
6103 }
6104 
6105 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6106 {
6107   GenCollectedHeap* gch = GenCollectedHeap::heap();
6108   FlexibleWorkGang* workers = gch->workers();
6109   assert(workers != NULL, "Need parallel worker threads.");
6110   CMSRefProcTaskProxy rp_task(task, &_collector,
6111                               _collector.ref_processor()->span(),
6112                               _collector.markBitMap(),
6113                               workers, _collector.task_queues());
6114   workers->run_task(&rp_task);
6115 }
6116 
6117 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6118 {
6119 
6120   GenCollectedHeap* gch = GenCollectedHeap::heap();
6121   FlexibleWorkGang* workers = gch->workers();
6122   assert(workers != NULL, "Need parallel worker threads.");
6123   CMSRefEnqueueTaskProxy enq_task(task);
6124   workers->run_task(&enq_task);
6125 }
6126 
6127 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6128 
6129   ResourceMark rm;
6130   HandleMark   hm;
6131 
6132   ReferenceProcessor* rp = ref_processor();
6133   assert(rp->span().equals(_span), "Spans should be equal");
6134   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6135   // Process weak references.
6136   rp->setup_policy(clear_all_soft_refs);
6137   verify_work_stacks_empty();
6138 
6139   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6140                                           &_markStack, false /* !preclean */);
6141   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6142                                 _span, &_markBitMap, &_markStack,
6143                                 &cmsKeepAliveClosure, false /* !preclean */);
6144   {
6145     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
6146 
6147     ReferenceProcessorStats stats;
6148     if (rp->processing_is_mt()) {
6149       // Set the degree of MT here.  If the discovery is done MT, there
6150       // may have been a different number of threads doing the discovery
6151       // and a different number of discovered lists may have Ref objects.
6152       // That is OK as long as the Reference lists are balanced (see
6153       // balance_all_queues() and balance_queues()).
6154       GenCollectedHeap* gch = GenCollectedHeap::heap();
6155       int active_workers = ParallelGCThreads;
6156       FlexibleWorkGang* workers = gch->workers();
6157       if (workers != NULL) {
6158         active_workers = workers->active_workers();
6159         // The expectation is that active_workers will have already
6160         // been set to a reasonable value.  If it has not been set,
6161         // investigate.
6162         assert(active_workers > 0, "Should have been set during scavenge");
6163       }
6164       rp->set_active_mt_degree(active_workers);
6165       CMSRefProcTaskExecutor task_executor(*this);
6166       stats = rp->process_discovered_references(&_is_alive_closure,
6167                                         &cmsKeepAliveClosure,
6168                                         &cmsDrainMarkingStackClosure,
6169                                         &task_executor,
6170                                         _gc_timer_cm);
6171     } else {
6172       stats = rp->process_discovered_references(&_is_alive_closure,
6173                                         &cmsKeepAliveClosure,
6174                                         &cmsDrainMarkingStackClosure,
6175                                         NULL,
6176                                         _gc_timer_cm);
6177     }
6178     _gc_tracer_cm->report_gc_reference_stats(stats);
6179 
6180   }
6181 
6182   // This is the point where the entire marking should have completed.
6183   verify_work_stacks_empty();
6184 
6185   if (should_unload_classes()) {
6186     {
6187       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
6188 
6189       // Unload classes and purge the SystemDictionary.
6190       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6191 
6192       // Unload nmethods.
6193       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6194 
6195       // Prune dead klasses from subklass/sibling/implementor lists.
6196       Klass::clean_weak_klass_links(&_is_alive_closure);
6197     }
6198 
6199     {
6200       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6201       // Clean up unreferenced symbols in symbol table.
6202       SymbolTable::unlink();
6203     }
6204   }
6205 
6206   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6207   // Need to check if we really scanned the StringTable.
6208   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6209     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6210     // Delete entries for dead interned strings.
6211     StringTable::unlink(&_is_alive_closure);
6212   }
6213 
6214   // Restore any preserved marks as a result of mark stack or
6215   // work queue overflow
6216   restore_preserved_marks_if_any();  // done single-threaded for now
6217 
6218   rp->set_enqueuing_is_done(true);
6219   if (rp->processing_is_mt()) {
6220     rp->balance_all_queues();
6221     CMSRefProcTaskExecutor task_executor(*this);
6222     rp->enqueue_discovered_references(&task_executor);
6223   } else {
6224     rp->enqueue_discovered_references(NULL);
6225   }
6226   rp->verify_no_references_recorded();
6227   assert(!rp->discovery_enabled(), "should have been disabled");
6228 }
6229 
6230 #ifndef PRODUCT
6231 void CMSCollector::check_correct_thread_executing() {
6232   Thread* t = Thread::current();
6233   // Only the VM thread or the CMS thread should be here.
6234   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6235          "Unexpected thread type");
6236   // If this is the vm thread, the foreground process
6237   // should not be waiting.  Note that _foregroundGCIsActive is
6238   // true while the foreground collector is waiting.
6239   if (_foregroundGCShouldWait) {
6240     // We cannot be the VM thread
6241     assert(t->is_ConcurrentGC_thread(),
6242            "Should be CMS thread");
6243   } else {
6244     // We can be the CMS thread only if we are in a stop-world
6245     // phase of CMS collection.
6246     if (t->is_ConcurrentGC_thread()) {
6247       assert(_collectorState == InitialMarking ||
6248              _collectorState == FinalMarking,
6249              "Should be a stop-world phase");
6250       // The CMS thread should be holding the CMS_token.
6251       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6252              "Potential interference with concurrently "
6253              "executing VM thread");
6254     }
6255   }
6256 }
6257 #endif
6258 
6259 void CMSCollector::sweep(bool asynch) {
6260   assert(_collectorState == Sweeping, "just checking");
6261   check_correct_thread_executing();
6262   verify_work_stacks_empty();
6263   verify_overflow_empty();
6264   increment_sweep_count();
6265   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6266 
6267   _inter_sweep_timer.stop();
6268   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6269   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6270 
6271   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6272   _intra_sweep_timer.reset();
6273   _intra_sweep_timer.start();
6274   if (asynch) {
6275     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6276     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6277     // First sweep the old gen
6278     {
6279       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6280                                bitMapLock());
6281       sweepWork(_cmsGen, asynch);
6282     }
6283 
6284     // Update Universe::_heap_*_at_gc figures.
6285     // We need all the free list locks to make the abstract state
6286     // transition from Sweeping to Resetting. See detailed note
6287     // further below.
6288     {
6289       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6290       // Update heap occupancy information which is used as
6291       // input to soft ref clearing policy at the next gc.
6292       Universe::update_heap_info_at_gc();
6293       _collectorState = Resizing;
6294     }
6295   } else {
6296     // already have needed locks
6297     sweepWork(_cmsGen,  asynch);
6298     // Update heap occupancy information which is used as
6299     // input to soft ref clearing policy at the next gc.
6300     Universe::update_heap_info_at_gc();
6301     _collectorState = Resizing;
6302   }
6303   verify_work_stacks_empty();
6304   verify_overflow_empty();
6305 
6306   if (should_unload_classes()) {
6307     ClassLoaderDataGraph::purge();
6308   }
6309 
6310   _intra_sweep_timer.stop();
6311   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6312 
6313   _inter_sweep_timer.reset();
6314   _inter_sweep_timer.start();
6315 
6316   // We need to use a monotonically non-deccreasing time in ms
6317   // or we will see time-warp warnings and os::javaTimeMillis()
6318   // does not guarantee monotonicity.
6319   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6320   update_time_of_last_gc(now);
6321 
6322   // NOTE on abstract state transitions:
6323   // Mutators allocate-live and/or mark the mod-union table dirty
6324   // based on the state of the collection.  The former is done in
6325   // the interval [Marking, Sweeping] and the latter in the interval
6326   // [Marking, Sweeping).  Thus the transitions into the Marking state
6327   // and out of the Sweeping state must be synchronously visible
6328   // globally to the mutators.
6329   // The transition into the Marking state happens with the world
6330   // stopped so the mutators will globally see it.  Sweeping is
6331   // done asynchronously by the background collector so the transition
6332   // from the Sweeping state to the Resizing state must be done
6333   // under the freelistLock (as is the check for whether to
6334   // allocate-live and whether to dirty the mod-union table).
6335   assert(_collectorState == Resizing, "Change of collector state to"
6336     " Resizing must be done under the freelistLocks (plural)");
6337 
6338   // Now that sweeping has been completed, we clear
6339   // the incremental_collection_failed flag,
6340   // thus inviting a younger gen collection to promote into
6341   // this generation. If such a promotion may still fail,
6342   // the flag will be set again when a young collection is
6343   // attempted.
6344   GenCollectedHeap* gch = GenCollectedHeap::heap();
6345   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6346   gch->update_full_collections_completed(_collection_count_start);
6347 }
6348 
6349 // FIX ME!!! Looks like this belongs in CFLSpace, with
6350 // CMSGen merely delegating to it.
6351 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6352   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6353   HeapWord*  minAddr        = _cmsSpace->bottom();
6354   HeapWord*  largestAddr    =
6355     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6356   if (largestAddr == NULL) {
6357     // The dictionary appears to be empty.  In this case
6358     // try to coalesce at the end of the heap.
6359     largestAddr = _cmsSpace->end();
6360   }
6361   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6362   size_t nearLargestOffset =
6363     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6364   if (PrintFLSStatistics != 0) {
6365     gclog_or_tty->print_cr(
6366       "CMS: Large Block: " PTR_FORMAT ";"
6367       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6368       largestAddr,
6369       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6370   }
6371   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6372 }
6373 
6374 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6375   return addr >= _cmsSpace->nearLargestChunk();
6376 }
6377 
6378 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6379   return _cmsSpace->find_chunk_at_end();
6380 }
6381 
6382 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6383                                                     bool full) {
6384   // The next lower level has been collected.  Gather any statistics
6385   // that are of interest at this point.
6386   if (!full && (current_level + 1) == level()) {
6387     // Gather statistics on the young generation collection.
6388     collector()->stats().record_gc0_end(used());
6389   }
6390 }
6391 
6392 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6393   GenCollectedHeap* gch = GenCollectedHeap::heap();
6394   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6395     "Wrong type of heap");
6396   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6397     gch->gen_policy()->size_policy();
6398   assert(sp->is_gc_cms_adaptive_size_policy(),
6399     "Wrong type of size policy");
6400   return sp;
6401 }
6402 
6403 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6404   if (PrintGCDetails && Verbose) {
6405     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6406   }
6407   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6408   _debug_collection_type =
6409     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6410   if (PrintGCDetails && Verbose) {
6411     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6412   }
6413 }
6414 
6415 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6416   bool asynch) {
6417   // We iterate over the space(s) underlying this generation,
6418   // checking the mark bit map to see if the bits corresponding
6419   // to specific blocks are marked or not. Blocks that are
6420   // marked are live and are not swept up. All remaining blocks
6421   // are swept up, with coalescing on-the-fly as we sweep up
6422   // contiguous free and/or garbage blocks:
6423   // We need to ensure that the sweeper synchronizes with allocators
6424   // and stop-the-world collectors. In particular, the following
6425   // locks are used:
6426   // . CMS token: if this is held, a stop the world collection cannot occur
6427   // . freelistLock: if this is held no allocation can occur from this
6428   //                 generation by another thread
6429   // . bitMapLock: if this is held, no other thread can access or update
6430   //
6431 
6432   // Note that we need to hold the freelistLock if we use
6433   // block iterate below; else the iterator might go awry if
6434   // a mutator (or promotion) causes block contents to change
6435   // (for instance if the allocator divvies up a block).
6436   // If we hold the free list lock, for all practical purposes
6437   // young generation GC's can't occur (they'll usually need to
6438   // promote), so we might as well prevent all young generation
6439   // GC's while we do a sweeping step. For the same reason, we might
6440   // as well take the bit map lock for the entire duration
6441 
6442   // check that we hold the requisite locks
6443   assert(have_cms_token(), "Should hold cms token");
6444   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6445          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6446         "Should possess CMS token to sweep");
6447   assert_lock_strong(gen->freelistLock());
6448   assert_lock_strong(bitMapLock());
6449 
6450   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6451   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6452   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6453                                       _inter_sweep_estimate.padded_average(),
6454                                       _intra_sweep_estimate.padded_average());
6455   gen->setNearLargestChunk();
6456 
6457   {
6458     SweepClosure sweepClosure(this, gen, &_markBitMap,
6459                             CMSYield && asynch);
6460     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6461     // We need to free-up/coalesce garbage/blocks from a
6462     // co-terminal free run. This is done in the SweepClosure
6463     // destructor; so, do not remove this scope, else the
6464     // end-of-sweep-census below will be off by a little bit.
6465   }
6466   gen->cmsSpace()->sweep_completed();
6467   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6468   if (should_unload_classes()) {                // unloaded classes this cycle,
6469     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6470   } else {                                      // did not unload classes,
6471     _concurrent_cycles_since_last_unload++;     // ... increment count
6472   }
6473 }
6474 
6475 // Reset CMS data structures (for now just the marking bit map)
6476 // preparatory for the next cycle.
6477 void CMSCollector::reset(bool asynch) {
6478   GenCollectedHeap* gch = GenCollectedHeap::heap();
6479   CMSAdaptiveSizePolicy* sp = size_policy();
6480   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6481   if (asynch) {
6482     CMSTokenSyncWithLocks ts(true, bitMapLock());
6483 
6484     // If the state is not "Resetting", the foreground  thread
6485     // has done a collection and the resetting.
6486     if (_collectorState != Resetting) {
6487       assert(_collectorState == Idling, "The state should only change"
6488         " because the foreground collector has finished the collection");
6489       return;
6490     }
6491 
6492     // Clear the mark bitmap (no grey objects to start with)
6493     // for the next cycle.
6494     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6495     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6496 
6497     HeapWord* curAddr = _markBitMap.startWord();
6498     while (curAddr < _markBitMap.endWord()) {
6499       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6500       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6501       _markBitMap.clear_large_range(chunk);
6502       if (ConcurrentMarkSweepThread::should_yield() &&
6503           !foregroundGCIsActive() &&
6504           CMSYield) {
6505         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6506                "CMS thread should hold CMS token");
6507         assert_lock_strong(bitMapLock());
6508         bitMapLock()->unlock();
6509         ConcurrentMarkSweepThread::desynchronize(true);
6510         ConcurrentMarkSweepThread::acknowledge_yield_request();
6511         stopTimer();
6512         if (PrintCMSStatistics != 0) {
6513           incrementYields();
6514         }
6515         icms_wait();
6516 
6517         // See the comment in coordinator_yield()
6518         for (unsigned i = 0; i < CMSYieldSleepCount &&
6519                          ConcurrentMarkSweepThread::should_yield() &&
6520                          !CMSCollector::foregroundGCIsActive(); ++i) {
6521           os::sleep(Thread::current(), 1, false);
6522           ConcurrentMarkSweepThread::acknowledge_yield_request();
6523         }
6524 
6525         ConcurrentMarkSweepThread::synchronize(true);
6526         bitMapLock()->lock_without_safepoint_check();
6527         startTimer();
6528       }
6529       curAddr = chunk.end();
6530     }
6531     // A successful mostly concurrent collection has been done.
6532     // Because only the full (i.e., concurrent mode failure) collections
6533     // are being measured for gc overhead limits, clean the "near" flag
6534     // and count.
6535     sp->reset_gc_overhead_limit_count();
6536     _collectorState = Idling;
6537   } else {
6538     // already have the lock
6539     assert(_collectorState == Resetting, "just checking");
6540     assert_lock_strong(bitMapLock());
6541     _markBitMap.clear_all();
6542     _collectorState = Idling;
6543   }
6544 
6545   // Stop incremental mode after a cycle completes, so that any future cycles
6546   // are triggered by allocation.
6547   stop_icms();
6548 
6549   NOT_PRODUCT(
6550     if (RotateCMSCollectionTypes) {
6551       _cmsGen->rotate_debug_collection_type();
6552     }
6553   )
6554 
6555   register_gc_end();
6556 }
6557 
6558 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6559   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6560   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6561   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6562   TraceCollectorStats tcs(counters());
6563 
6564   switch (op) {
6565     case CMS_op_checkpointRootsInitial: {
6566       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6567       checkpointRootsInitial(true);       // asynch
6568       if (PrintGC) {
6569         _cmsGen->printOccupancy("initial-mark");
6570       }
6571       break;
6572     }
6573     case CMS_op_checkpointRootsFinal: {
6574       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6575       checkpointRootsFinal(true,    // asynch
6576                            false,   // !clear_all_soft_refs
6577                            false);  // !init_mark_was_synchronous
6578       if (PrintGC) {
6579         _cmsGen->printOccupancy("remark");
6580       }
6581       break;
6582     }
6583     default:
6584       fatal("No such CMS_op");
6585   }
6586 }
6587 
6588 #ifndef PRODUCT
6589 size_t const CMSCollector::skip_header_HeapWords() {
6590   return FreeChunk::header_size();
6591 }
6592 
6593 // Try and collect here conditions that should hold when
6594 // CMS thread is exiting. The idea is that the foreground GC
6595 // thread should not be blocked if it wants to terminate
6596 // the CMS thread and yet continue to run the VM for a while
6597 // after that.
6598 void CMSCollector::verify_ok_to_terminate() const {
6599   assert(Thread::current()->is_ConcurrentGC_thread(),
6600          "should be called by CMS thread");
6601   assert(!_foregroundGCShouldWait, "should be false");
6602   // We could check here that all the various low-level locks
6603   // are not held by the CMS thread, but that is overkill; see
6604   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6605   // is checked.
6606 }
6607 #endif
6608 
6609 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6610    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6611           "missing Printezis mark?");
6612   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6613   size_t size = pointer_delta(nextOneAddr + 1, addr);
6614   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6615          "alignment problem");
6616   assert(size >= 3, "Necessary for Printezis marks to work");
6617   return size;
6618 }
6619 
6620 // A variant of the above (block_size_using_printezis_bits()) except
6621 // that we return 0 if the P-bits are not yet set.
6622 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6623   if (_markBitMap.isMarked(addr + 1)) {
6624     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6625     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6626     size_t size = pointer_delta(nextOneAddr + 1, addr);
6627     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6628            "alignment problem");
6629     assert(size >= 3, "Necessary for Printezis marks to work");
6630     return size;
6631   }
6632   return 0;
6633 }
6634 
6635 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6636   size_t sz = 0;
6637   oop p = (oop)addr;
6638   if (p->klass_or_null() != NULL) {
6639     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6640   } else {
6641     sz = block_size_using_printezis_bits(addr);
6642   }
6643   assert(sz > 0, "size must be nonzero");
6644   HeapWord* next_block = addr + sz;
6645   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6646                                              CardTableModRefBS::card_size);
6647   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6648          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6649          "must be different cards");
6650   return next_card;
6651 }
6652 
6653 
6654 // CMS Bit Map Wrapper /////////////////////////////////////////
6655 
6656 // Construct a CMS bit map infrastructure, but don't create the
6657 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6658 // further below.
6659 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6660   _bm(),
6661   _shifter(shifter),
6662   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6663 {
6664   _bmStartWord = 0;
6665   _bmWordSize  = 0;
6666 }
6667 
6668 bool CMSBitMap::allocate(MemRegion mr) {
6669   _bmStartWord = mr.start();
6670   _bmWordSize  = mr.word_size();
6671   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6672                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6673   if (!brs.is_reserved()) {
6674     warning("CMS bit map allocation failure");
6675     return false;
6676   }
6677   // For now we'll just commit all of the bit map up fromt.
6678   // Later on we'll try to be more parsimonious with swap.
6679   if (!_virtual_space.initialize(brs, brs.size())) {
6680     warning("CMS bit map backing store failure");
6681     return false;
6682   }
6683   assert(_virtual_space.committed_size() == brs.size(),
6684          "didn't reserve backing store for all of CMS bit map?");
6685   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6686   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6687          _bmWordSize, "inconsistency in bit map sizing");
6688   _bm.set_size(_bmWordSize >> _shifter);
6689 
6690   // bm.clear(); // can we rely on getting zero'd memory? verify below
6691   assert(isAllClear(),
6692          "Expected zero'd memory from ReservedSpace constructor");
6693   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6694          "consistency check");
6695   return true;
6696 }
6697 
6698 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6699   HeapWord *next_addr, *end_addr, *last_addr;
6700   assert_locked();
6701   assert(covers(mr), "out-of-range error");
6702   // XXX assert that start and end are appropriately aligned
6703   for (next_addr = mr.start(), end_addr = mr.end();
6704        next_addr < end_addr; next_addr = last_addr) {
6705     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6706     last_addr = dirty_region.end();
6707     if (!dirty_region.is_empty()) {
6708       cl->do_MemRegion(dirty_region);
6709     } else {
6710       assert(last_addr == end_addr, "program logic");
6711       return;
6712     }
6713   }
6714 }
6715 
6716 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6717   _bm.print_on_error(st, prefix);
6718 }
6719 
6720 #ifndef PRODUCT
6721 void CMSBitMap::assert_locked() const {
6722   CMSLockVerifier::assert_locked(lock());
6723 }
6724 
6725 bool CMSBitMap::covers(MemRegion mr) const {
6726   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6727   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6728          "size inconsistency");
6729   return (mr.start() >= _bmStartWord) &&
6730          (mr.end()   <= endWord());
6731 }
6732 
6733 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6734     return (start >= _bmStartWord && (start + size) <= endWord());
6735 }
6736 
6737 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6738   // verify that there are no 1 bits in the interval [left, right)
6739   FalseBitMapClosure falseBitMapClosure;
6740   iterate(&falseBitMapClosure, left, right);
6741 }
6742 
6743 void CMSBitMap::region_invariant(MemRegion mr)
6744 {
6745   assert_locked();
6746   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6747   assert(!mr.is_empty(), "unexpected empty region");
6748   assert(covers(mr), "mr should be covered by bit map");
6749   // convert address range into offset range
6750   size_t start_ofs = heapWordToOffset(mr.start());
6751   // Make sure that end() is appropriately aligned
6752   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6753                         (1 << (_shifter+LogHeapWordSize))),
6754          "Misaligned mr.end()");
6755   size_t end_ofs   = heapWordToOffset(mr.end());
6756   assert(end_ofs > start_ofs, "Should mark at least one bit");
6757 }
6758 
6759 #endif
6760 
6761 bool CMSMarkStack::allocate(size_t size) {
6762   // allocate a stack of the requisite depth
6763   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6764                    size * sizeof(oop)));
6765   if (!rs.is_reserved()) {
6766     warning("CMSMarkStack allocation failure");
6767     return false;
6768   }
6769   if (!_virtual_space.initialize(rs, rs.size())) {
6770     warning("CMSMarkStack backing store failure");
6771     return false;
6772   }
6773   assert(_virtual_space.committed_size() == rs.size(),
6774          "didn't reserve backing store for all of CMS stack?");
6775   _base = (oop*)(_virtual_space.low());
6776   _index = 0;
6777   _capacity = size;
6778   NOT_PRODUCT(_max_depth = 0);
6779   return true;
6780 }
6781 
6782 // XXX FIX ME !!! In the MT case we come in here holding a
6783 // leaf lock. For printing we need to take a further lock
6784 // which has lower rank. We need to recallibrate the two
6785 // lock-ranks involved in order to be able to rpint the
6786 // messages below. (Or defer the printing to the caller.
6787 // For now we take the expedient path of just disabling the
6788 // messages for the problematic case.)
6789 void CMSMarkStack::expand() {
6790   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6791   if (_capacity == MarkStackSizeMax) {
6792     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6793       // We print a warning message only once per CMS cycle.
6794       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6795     }
6796     return;
6797   }
6798   // Double capacity if possible
6799   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6800   // Do not give up existing stack until we have managed to
6801   // get the double capacity that we desired.
6802   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6803                    new_capacity * sizeof(oop)));
6804   if (rs.is_reserved()) {
6805     // Release the backing store associated with old stack
6806     _virtual_space.release();
6807     // Reinitialize virtual space for new stack
6808     if (!_virtual_space.initialize(rs, rs.size())) {
6809       fatal("Not enough swap for expanded marking stack");
6810     }
6811     _base = (oop*)(_virtual_space.low());
6812     _index = 0;
6813     _capacity = new_capacity;
6814   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6815     // Failed to double capacity, continue;
6816     // we print a detail message only once per CMS cycle.
6817     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6818             SIZE_FORMAT"K",
6819             _capacity / K, new_capacity / K);
6820   }
6821 }
6822 
6823 
6824 // Closures
6825 // XXX: there seems to be a lot of code  duplication here;
6826 // should refactor and consolidate common code.
6827 
6828 // This closure is used to mark refs into the CMS generation in
6829 // the CMS bit map. Called at the first checkpoint. This closure
6830 // assumes that we do not need to re-mark dirty cards; if the CMS
6831 // generation on which this is used is not an oldest
6832 // generation then this will lose younger_gen cards!
6833 
6834 MarkRefsIntoClosure::MarkRefsIntoClosure(
6835   MemRegion span, CMSBitMap* bitMap):
6836     _span(span),
6837     _bitMap(bitMap)
6838 {
6839     assert(_ref_processor == NULL, "deliberately left NULL");
6840     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6841 }
6842 
6843 void MarkRefsIntoClosure::do_oop(oop obj) {
6844   // if p points into _span, then mark corresponding bit in _markBitMap
6845   assert(obj->is_oop(), "expected an oop");
6846   HeapWord* addr = (HeapWord*)obj;
6847   if (_span.contains(addr)) {
6848     // this should be made more efficient
6849     _bitMap->mark(addr);
6850   }
6851 }
6852 
6853 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6854 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6855 
6856 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6857   MemRegion span, CMSBitMap* bitMap):
6858     _span(span),
6859     _bitMap(bitMap)
6860 {
6861     assert(_ref_processor == NULL, "deliberately left NULL");
6862     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6863 }
6864 
6865 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6866   // if p points into _span, then mark corresponding bit in _markBitMap
6867   assert(obj->is_oop(), "expected an oop");
6868   HeapWord* addr = (HeapWord*)obj;
6869   if (_span.contains(addr)) {
6870     // this should be made more efficient
6871     _bitMap->par_mark(addr);
6872   }
6873 }
6874 
6875 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6876 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6877 
6878 // A variant of the above, used for CMS marking verification.
6879 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6880   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6881     _span(span),
6882     _verification_bm(verification_bm),
6883     _cms_bm(cms_bm)
6884 {
6885     assert(_ref_processor == NULL, "deliberately left NULL");
6886     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6887 }
6888 
6889 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6890   // if p points into _span, then mark corresponding bit in _markBitMap
6891   assert(obj->is_oop(), "expected an oop");
6892   HeapWord* addr = (HeapWord*)obj;
6893   if (_span.contains(addr)) {
6894     _verification_bm->mark(addr);
6895     if (!_cms_bm->isMarked(addr)) {
6896       oop(addr)->print();
6897       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6898       fatal("... aborting");
6899     }
6900   }
6901 }
6902 
6903 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6904 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6905 
6906 //////////////////////////////////////////////////
6907 // MarkRefsIntoAndScanClosure
6908 //////////////////////////////////////////////////
6909 
6910 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6911                                                        ReferenceProcessor* rp,
6912                                                        CMSBitMap* bit_map,
6913                                                        CMSBitMap* mod_union_table,
6914                                                        CMSMarkStack*  mark_stack,
6915                                                        CMSCollector* collector,
6916                                                        bool should_yield,
6917                                                        bool concurrent_precleaning):
6918   _collector(collector),
6919   _span(span),
6920   _bit_map(bit_map),
6921   _mark_stack(mark_stack),
6922   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6923                       mark_stack, concurrent_precleaning),
6924   _yield(should_yield),
6925   _concurrent_precleaning(concurrent_precleaning),
6926   _freelistLock(NULL)
6927 {
6928   _ref_processor = rp;
6929   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6930 }
6931 
6932 // This closure is used to mark refs into the CMS generation at the
6933 // second (final) checkpoint, and to scan and transitively follow
6934 // the unmarked oops. It is also used during the concurrent precleaning
6935 // phase while scanning objects on dirty cards in the CMS generation.
6936 // The marks are made in the marking bit map and the marking stack is
6937 // used for keeping the (newly) grey objects during the scan.
6938 // The parallel version (Par_...) appears further below.
6939 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6940   if (obj != NULL) {
6941     assert(obj->is_oop(), "expected an oop");
6942     HeapWord* addr = (HeapWord*)obj;
6943     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6944     assert(_collector->overflow_list_is_empty(),
6945            "overflow list should be empty");
6946     if (_span.contains(addr) &&
6947         !_bit_map->isMarked(addr)) {
6948       // mark bit map (object is now grey)
6949       _bit_map->mark(addr);
6950       // push on marking stack (stack should be empty), and drain the
6951       // stack by applying this closure to the oops in the oops popped
6952       // from the stack (i.e. blacken the grey objects)
6953       bool res = _mark_stack->push(obj);
6954       assert(res, "Should have space to push on empty stack");
6955       do {
6956         oop new_oop = _mark_stack->pop();
6957         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6958         assert(_bit_map->isMarked((HeapWord*)new_oop),
6959                "only grey objects on this stack");
6960         // iterate over the oops in this oop, marking and pushing
6961         // the ones in CMS heap (i.e. in _span).
6962         new_oop->oop_iterate(&_pushAndMarkClosure);
6963         // check if it's time to yield
6964         do_yield_check();
6965       } while (!_mark_stack->isEmpty() ||
6966                (!_concurrent_precleaning && take_from_overflow_list()));
6967         // if marking stack is empty, and we are not doing this
6968         // during precleaning, then check the overflow list
6969     }
6970     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6971     assert(_collector->overflow_list_is_empty(),
6972            "overflow list was drained above");
6973     // We could restore evacuated mark words, if any, used for
6974     // overflow list links here because the overflow list is
6975     // provably empty here. That would reduce the maximum
6976     // size requirements for preserved_{oop,mark}_stack.
6977     // But we'll just postpone it until we are all done
6978     // so we can just stream through.
6979     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6980       _collector->restore_preserved_marks_if_any();
6981       assert(_collector->no_preserved_marks(), "No preserved marks");
6982     }
6983     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6984            "All preserved marks should have been restored above");
6985   }
6986 }
6987 
6988 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6989 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6990 
6991 void MarkRefsIntoAndScanClosure::do_yield_work() {
6992   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6993          "CMS thread should hold CMS token");
6994   assert_lock_strong(_freelistLock);
6995   assert_lock_strong(_bit_map->lock());
6996   // relinquish the free_list_lock and bitMaplock()
6997   _bit_map->lock()->unlock();
6998   _freelistLock->unlock();
6999   ConcurrentMarkSweepThread::desynchronize(true);
7000   ConcurrentMarkSweepThread::acknowledge_yield_request();
7001   _collector->stopTimer();
7002   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7003   if (PrintCMSStatistics != 0) {
7004     _collector->incrementYields();
7005   }
7006   _collector->icms_wait();
7007 
7008   // See the comment in coordinator_yield()
7009   for (unsigned i = 0;
7010        i < CMSYieldSleepCount &&
7011        ConcurrentMarkSweepThread::should_yield() &&
7012        !CMSCollector::foregroundGCIsActive();
7013        ++i) {
7014     os::sleep(Thread::current(), 1, false);
7015     ConcurrentMarkSweepThread::acknowledge_yield_request();
7016   }
7017 
7018   ConcurrentMarkSweepThread::synchronize(true);
7019   _freelistLock->lock_without_safepoint_check();
7020   _bit_map->lock()->lock_without_safepoint_check();
7021   _collector->startTimer();
7022 }
7023 
7024 ///////////////////////////////////////////////////////////
7025 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7026 //                                 MarkRefsIntoAndScanClosure
7027 ///////////////////////////////////////////////////////////
7028 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7029   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7030   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7031   _span(span),
7032   _bit_map(bit_map),
7033   _work_queue(work_queue),
7034   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7035                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7036   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7037 {
7038   _ref_processor = rp;
7039   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7040 }
7041 
7042 // This closure is used to mark refs into the CMS generation at the
7043 // second (final) checkpoint, and to scan and transitively follow
7044 // the unmarked oops. The marks are made in the marking bit map and
7045 // the work_queue is used for keeping the (newly) grey objects during
7046 // the scan phase whence they are also available for stealing by parallel
7047 // threads. Since the marking bit map is shared, updates are
7048 // synchronized (via CAS).
7049 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7050   if (obj != NULL) {
7051     // Ignore mark word because this could be an already marked oop
7052     // that may be chained at the end of the overflow list.
7053     assert(obj->is_oop(true), "expected an oop");
7054     HeapWord* addr = (HeapWord*)obj;
7055     if (_span.contains(addr) &&
7056         !_bit_map->isMarked(addr)) {
7057       // mark bit map (object will become grey):
7058       // It is possible for several threads to be
7059       // trying to "claim" this object concurrently;
7060       // the unique thread that succeeds in marking the
7061       // object first will do the subsequent push on
7062       // to the work queue (or overflow list).
7063       if (_bit_map->par_mark(addr)) {
7064         // push on work_queue (which may not be empty), and trim the
7065         // queue to an appropriate length by applying this closure to
7066         // the oops in the oops popped from the stack (i.e. blacken the
7067         // grey objects)
7068         bool res = _work_queue->push(obj);
7069         assert(res, "Low water mark should be less than capacity?");
7070         trim_queue(_low_water_mark);
7071       } // Else, another thread claimed the object
7072     }
7073   }
7074 }
7075 
7076 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7077 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7078 
7079 // This closure is used to rescan the marked objects on the dirty cards
7080 // in the mod union table and the card table proper.
7081 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7082   oop p, MemRegion mr) {
7083 
7084   size_t size = 0;
7085   HeapWord* addr = (HeapWord*)p;
7086   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7087   assert(_span.contains(addr), "we are scanning the CMS generation");
7088   // check if it's time to yield
7089   if (do_yield_check()) {
7090     // We yielded for some foreground stop-world work,
7091     // and we have been asked to abort this ongoing preclean cycle.
7092     return 0;
7093   }
7094   if (_bitMap->isMarked(addr)) {
7095     // it's marked; is it potentially uninitialized?
7096     if (p->klass_or_null() != NULL) {
7097         // an initialized object; ignore mark word in verification below
7098         // since we are running concurrent with mutators
7099         assert(p->is_oop(true), "should be an oop");
7100         if (p->is_objArray()) {
7101           // objArrays are precisely marked; restrict scanning
7102           // to dirty cards only.
7103           size = CompactibleFreeListSpace::adjustObjectSize(
7104                    p->oop_iterate(_scanningClosure, mr));
7105         } else {
7106           // A non-array may have been imprecisely marked; we need
7107           // to scan object in its entirety.
7108           size = CompactibleFreeListSpace::adjustObjectSize(
7109                    p->oop_iterate(_scanningClosure));
7110         }
7111         #ifdef ASSERT
7112           size_t direct_size =
7113             CompactibleFreeListSpace::adjustObjectSize(p->size());
7114           assert(size == direct_size, "Inconsistency in size");
7115           assert(size >= 3, "Necessary for Printezis marks to work");
7116           if (!_bitMap->isMarked(addr+1)) {
7117             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7118           } else {
7119             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7120             assert(_bitMap->isMarked(addr+size-1),
7121                    "inconsistent Printezis mark");
7122           }
7123         #endif // ASSERT
7124     } else {
7125       // an unitialized object
7126       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7127       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7128       size = pointer_delta(nextOneAddr + 1, addr);
7129       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7130              "alignment problem");
7131       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7132       // will dirty the card when the klass pointer is installed in the
7133       // object (signalling the completion of initialization).
7134     }
7135   } else {
7136     // Either a not yet marked object or an uninitialized object
7137     if (p->klass_or_null() == NULL) {
7138       // An uninitialized object, skip to the next card, since
7139       // we may not be able to read its P-bits yet.
7140       assert(size == 0, "Initial value");
7141     } else {
7142       // An object not (yet) reached by marking: we merely need to
7143       // compute its size so as to go look at the next block.
7144       assert(p->is_oop(true), "should be an oop");
7145       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7146     }
7147   }
7148   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7149   return size;
7150 }
7151 
7152 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7153   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7154          "CMS thread should hold CMS token");
7155   assert_lock_strong(_freelistLock);
7156   assert_lock_strong(_bitMap->lock());
7157   // relinquish the free_list_lock and bitMaplock()
7158   _bitMap->lock()->unlock();
7159   _freelistLock->unlock();
7160   ConcurrentMarkSweepThread::desynchronize(true);
7161   ConcurrentMarkSweepThread::acknowledge_yield_request();
7162   _collector->stopTimer();
7163   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7164   if (PrintCMSStatistics != 0) {
7165     _collector->incrementYields();
7166   }
7167   _collector->icms_wait();
7168 
7169   // See the comment in coordinator_yield()
7170   for (unsigned i = 0; i < CMSYieldSleepCount &&
7171                    ConcurrentMarkSweepThread::should_yield() &&
7172                    !CMSCollector::foregroundGCIsActive(); ++i) {
7173     os::sleep(Thread::current(), 1, false);
7174     ConcurrentMarkSweepThread::acknowledge_yield_request();
7175   }
7176 
7177   ConcurrentMarkSweepThread::synchronize(true);
7178   _freelistLock->lock_without_safepoint_check();
7179   _bitMap->lock()->lock_without_safepoint_check();
7180   _collector->startTimer();
7181 }
7182 
7183 
7184 //////////////////////////////////////////////////////////////////
7185 // SurvivorSpacePrecleanClosure
7186 //////////////////////////////////////////////////////////////////
7187 // This (single-threaded) closure is used to preclean the oops in
7188 // the survivor spaces.
7189 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7190 
7191   HeapWord* addr = (HeapWord*)p;
7192   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7193   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7194   assert(p->klass_or_null() != NULL, "object should be initializd");
7195   // an initialized object; ignore mark word in verification below
7196   // since we are running concurrent with mutators
7197   assert(p->is_oop(true), "should be an oop");
7198   // Note that we do not yield while we iterate over
7199   // the interior oops of p, pushing the relevant ones
7200   // on our marking stack.
7201   size_t size = p->oop_iterate(_scanning_closure);
7202   do_yield_check();
7203   // Observe that below, we do not abandon the preclean
7204   // phase as soon as we should; rather we empty the
7205   // marking stack before returning. This is to satisfy
7206   // some existing assertions. In general, it may be a
7207   // good idea to abort immediately and complete the marking
7208   // from the grey objects at a later time.
7209   while (!_mark_stack->isEmpty()) {
7210     oop new_oop = _mark_stack->pop();
7211     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7212     assert(_bit_map->isMarked((HeapWord*)new_oop),
7213            "only grey objects on this stack");
7214     // iterate over the oops in this oop, marking and pushing
7215     // the ones in CMS heap (i.e. in _span).
7216     new_oop->oop_iterate(_scanning_closure);
7217     // check if it's time to yield
7218     do_yield_check();
7219   }
7220   unsigned int after_count =
7221     GenCollectedHeap::heap()->total_collections();
7222   bool abort = (_before_count != after_count) ||
7223                _collector->should_abort_preclean();
7224   return abort ? 0 : size;
7225 }
7226 
7227 void SurvivorSpacePrecleanClosure::do_yield_work() {
7228   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7229          "CMS thread should hold CMS token");
7230   assert_lock_strong(_bit_map->lock());
7231   // Relinquish the bit map lock
7232   _bit_map->lock()->unlock();
7233   ConcurrentMarkSweepThread::desynchronize(true);
7234   ConcurrentMarkSweepThread::acknowledge_yield_request();
7235   _collector->stopTimer();
7236   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7237   if (PrintCMSStatistics != 0) {
7238     _collector->incrementYields();
7239   }
7240   _collector->icms_wait();
7241 
7242   // See the comment in coordinator_yield()
7243   for (unsigned i = 0; i < CMSYieldSleepCount &&
7244                        ConcurrentMarkSweepThread::should_yield() &&
7245                        !CMSCollector::foregroundGCIsActive(); ++i) {
7246     os::sleep(Thread::current(), 1, false);
7247     ConcurrentMarkSweepThread::acknowledge_yield_request();
7248   }
7249 
7250   ConcurrentMarkSweepThread::synchronize(true);
7251   _bit_map->lock()->lock_without_safepoint_check();
7252   _collector->startTimer();
7253 }
7254 
7255 // This closure is used to rescan the marked objects on the dirty cards
7256 // in the mod union table and the card table proper. In the parallel
7257 // case, although the bitMap is shared, we do a single read so the
7258 // isMarked() query is "safe".
7259 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7260   // Ignore mark word because we are running concurrent with mutators
7261   assert(p->is_oop_or_null(true), "expected an oop or null");
7262   HeapWord* addr = (HeapWord*)p;
7263   assert(_span.contains(addr), "we are scanning the CMS generation");
7264   bool is_obj_array = false;
7265   #ifdef ASSERT
7266     if (!_parallel) {
7267       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7268       assert(_collector->overflow_list_is_empty(),
7269              "overflow list should be empty");
7270 
7271     }
7272   #endif // ASSERT
7273   if (_bit_map->isMarked(addr)) {
7274     // Obj arrays are precisely marked, non-arrays are not;
7275     // so we scan objArrays precisely and non-arrays in their
7276     // entirety.
7277     if (p->is_objArray()) {
7278       is_obj_array = true;
7279       if (_parallel) {
7280         p->oop_iterate(_par_scan_closure, mr);
7281       } else {
7282         p->oop_iterate(_scan_closure, mr);
7283       }
7284     } else {
7285       if (_parallel) {
7286         p->oop_iterate(_par_scan_closure);
7287       } else {
7288         p->oop_iterate(_scan_closure);
7289       }
7290     }
7291   }
7292   #ifdef ASSERT
7293     if (!_parallel) {
7294       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7295       assert(_collector->overflow_list_is_empty(),
7296              "overflow list should be empty");
7297 
7298     }
7299   #endif // ASSERT
7300   return is_obj_array;
7301 }
7302 
7303 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7304                         MemRegion span,
7305                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7306                         bool should_yield, bool verifying):
7307   _collector(collector),
7308   _span(span),
7309   _bitMap(bitMap),
7310   _mut(&collector->_modUnionTable),
7311   _markStack(markStack),
7312   _yield(should_yield),
7313   _skipBits(0)
7314 {
7315   assert(_markStack->isEmpty(), "stack should be empty");
7316   _finger = _bitMap->startWord();
7317   _threshold = _finger;
7318   assert(_collector->_restart_addr == NULL, "Sanity check");
7319   assert(_span.contains(_finger), "Out of bounds _finger?");
7320   DEBUG_ONLY(_verifying = verifying;)
7321 }
7322 
7323 void MarkFromRootsClosure::reset(HeapWord* addr) {
7324   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7325   assert(_span.contains(addr), "Out of bounds _finger?");
7326   _finger = addr;
7327   _threshold = (HeapWord*)round_to(
7328                  (intptr_t)_finger, CardTableModRefBS::card_size);
7329 }
7330 
7331 // Should revisit to see if this should be restructured for
7332 // greater efficiency.
7333 bool MarkFromRootsClosure::do_bit(size_t offset) {
7334   if (_skipBits > 0) {
7335     _skipBits--;
7336     return true;
7337   }
7338   // convert offset into a HeapWord*
7339   HeapWord* addr = _bitMap->startWord() + offset;
7340   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7341          "address out of range");
7342   assert(_bitMap->isMarked(addr), "tautology");
7343   if (_bitMap->isMarked(addr+1)) {
7344     // this is an allocated but not yet initialized object
7345     assert(_skipBits == 0, "tautology");
7346     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7347     oop p = oop(addr);
7348     if (p->klass_or_null() == NULL) {
7349       DEBUG_ONLY(if (!_verifying) {)
7350         // We re-dirty the cards on which this object lies and increase
7351         // the _threshold so that we'll come back to scan this object
7352         // during the preclean or remark phase. (CMSCleanOnEnter)
7353         if (CMSCleanOnEnter) {
7354           size_t sz = _collector->block_size_using_printezis_bits(addr);
7355           HeapWord* end_card_addr   = (HeapWord*)round_to(
7356                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7357           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7358           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7359           // Bump _threshold to end_card_addr; note that
7360           // _threshold cannot possibly exceed end_card_addr, anyhow.
7361           // This prevents future clearing of the card as the scan proceeds
7362           // to the right.
7363           assert(_threshold <= end_card_addr,
7364                  "Because we are just scanning into this object");
7365           if (_threshold < end_card_addr) {
7366             _threshold = end_card_addr;
7367           }
7368           if (p->klass_or_null() != NULL) {
7369             // Redirty the range of cards...
7370             _mut->mark_range(redirty_range);
7371           } // ...else the setting of klass will dirty the card anyway.
7372         }
7373       DEBUG_ONLY(})
7374       return true;
7375     }
7376   }
7377   scanOopsInOop(addr);
7378   return true;
7379 }
7380 
7381 // We take a break if we've been at this for a while,
7382 // so as to avoid monopolizing the locks involved.
7383 void MarkFromRootsClosure::do_yield_work() {
7384   // First give up the locks, then yield, then re-lock
7385   // We should probably use a constructor/destructor idiom to
7386   // do this unlock/lock or modify the MutexUnlocker class to
7387   // serve our purpose. XXX
7388   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7389          "CMS thread should hold CMS token");
7390   assert_lock_strong(_bitMap->lock());
7391   _bitMap->lock()->unlock();
7392   ConcurrentMarkSweepThread::desynchronize(true);
7393   ConcurrentMarkSweepThread::acknowledge_yield_request();
7394   _collector->stopTimer();
7395   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7396   if (PrintCMSStatistics != 0) {
7397     _collector->incrementYields();
7398   }
7399   _collector->icms_wait();
7400 
7401   // See the comment in coordinator_yield()
7402   for (unsigned i = 0; i < CMSYieldSleepCount &&
7403                        ConcurrentMarkSweepThread::should_yield() &&
7404                        !CMSCollector::foregroundGCIsActive(); ++i) {
7405     os::sleep(Thread::current(), 1, false);
7406     ConcurrentMarkSweepThread::acknowledge_yield_request();
7407   }
7408 
7409   ConcurrentMarkSweepThread::synchronize(true);
7410   _bitMap->lock()->lock_without_safepoint_check();
7411   _collector->startTimer();
7412 }
7413 
7414 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7415   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7416   assert(_markStack->isEmpty(),
7417          "should drain stack to limit stack usage");
7418   // convert ptr to an oop preparatory to scanning
7419   oop obj = oop(ptr);
7420   // Ignore mark word in verification below, since we
7421   // may be running concurrent with mutators.
7422   assert(obj->is_oop(true), "should be an oop");
7423   assert(_finger <= ptr, "_finger runneth ahead");
7424   // advance the finger to right end of this object
7425   _finger = ptr + obj->size();
7426   assert(_finger > ptr, "we just incremented it above");
7427   // On large heaps, it may take us some time to get through
7428   // the marking phase (especially if running iCMS). During
7429   // this time it's possible that a lot of mutations have
7430   // accumulated in the card table and the mod union table --
7431   // these mutation records are redundant until we have
7432   // actually traced into the corresponding card.
7433   // Here, we check whether advancing the finger would make
7434   // us cross into a new card, and if so clear corresponding
7435   // cards in the MUT (preclean them in the card-table in the
7436   // future).
7437 
7438   DEBUG_ONLY(if (!_verifying) {)
7439     // The clean-on-enter optimization is disabled by default,
7440     // until we fix 6178663.
7441     if (CMSCleanOnEnter && (_finger > _threshold)) {
7442       // [_threshold, _finger) represents the interval
7443       // of cards to be cleared  in MUT (or precleaned in card table).
7444       // The set of cards to be cleared is all those that overlap
7445       // with the interval [_threshold, _finger); note that
7446       // _threshold is always kept card-aligned but _finger isn't
7447       // always card-aligned.
7448       HeapWord* old_threshold = _threshold;
7449       assert(old_threshold == (HeapWord*)round_to(
7450               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7451              "_threshold should always be card-aligned");
7452       _threshold = (HeapWord*)round_to(
7453                      (intptr_t)_finger, CardTableModRefBS::card_size);
7454       MemRegion mr(old_threshold, _threshold);
7455       assert(!mr.is_empty(), "Control point invariant");
7456       assert(_span.contains(mr), "Should clear within span");
7457       _mut->clear_range(mr);
7458     }
7459   DEBUG_ONLY(})
7460   // Note: the finger doesn't advance while we drain
7461   // the stack below.
7462   PushOrMarkClosure pushOrMarkClosure(_collector,
7463                                       _span, _bitMap, _markStack,
7464                                       _finger, this);
7465   bool res = _markStack->push(obj);
7466   assert(res, "Empty non-zero size stack should have space for single push");
7467   while (!_markStack->isEmpty()) {
7468     oop new_oop = _markStack->pop();
7469     // Skip verifying header mark word below because we are
7470     // running concurrent with mutators.
7471     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7472     // now scan this oop's oops
7473     new_oop->oop_iterate(&pushOrMarkClosure);
7474     do_yield_check();
7475   }
7476   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7477 }
7478 
7479 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7480                        CMSCollector* collector, MemRegion span,
7481                        CMSBitMap* bit_map,
7482                        OopTaskQueue* work_queue,
7483                        CMSMarkStack*  overflow_stack,
7484                        bool should_yield):
7485   _collector(collector),
7486   _whole_span(collector->_span),
7487   _span(span),
7488   _bit_map(bit_map),
7489   _mut(&collector->_modUnionTable),
7490   _work_queue(work_queue),
7491   _overflow_stack(overflow_stack),
7492   _yield(should_yield),
7493   _skip_bits(0),
7494   _task(task)
7495 {
7496   assert(_work_queue->size() == 0, "work_queue should be empty");
7497   _finger = span.start();
7498   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7499   assert(_span.contains(_finger), "Out of bounds _finger?");
7500 }
7501 
7502 // Should revisit to see if this should be restructured for
7503 // greater efficiency.
7504 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7505   if (_skip_bits > 0) {
7506     _skip_bits--;
7507     return true;
7508   }
7509   // convert offset into a HeapWord*
7510   HeapWord* addr = _bit_map->startWord() + offset;
7511   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7512          "address out of range");
7513   assert(_bit_map->isMarked(addr), "tautology");
7514   if (_bit_map->isMarked(addr+1)) {
7515     // this is an allocated object that might not yet be initialized
7516     assert(_skip_bits == 0, "tautology");
7517     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7518     oop p = oop(addr);
7519     if (p->klass_or_null() == NULL) {
7520       // in the case of Clean-on-Enter optimization, redirty card
7521       // and avoid clearing card by increasing  the threshold.
7522       return true;
7523     }
7524   }
7525   scan_oops_in_oop(addr);
7526   return true;
7527 }
7528 
7529 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7530   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7531   // Should we assert that our work queue is empty or
7532   // below some drain limit?
7533   assert(_work_queue->size() == 0,
7534          "should drain stack to limit stack usage");
7535   // convert ptr to an oop preparatory to scanning
7536   oop obj = oop(ptr);
7537   // Ignore mark word in verification below, since we
7538   // may be running concurrent with mutators.
7539   assert(obj->is_oop(true), "should be an oop");
7540   assert(_finger <= ptr, "_finger runneth ahead");
7541   // advance the finger to right end of this object
7542   _finger = ptr + obj->size();
7543   assert(_finger > ptr, "we just incremented it above");
7544   // On large heaps, it may take us some time to get through
7545   // the marking phase (especially if running iCMS). During
7546   // this time it's possible that a lot of mutations have
7547   // accumulated in the card table and the mod union table --
7548   // these mutation records are redundant until we have
7549   // actually traced into the corresponding card.
7550   // Here, we check whether advancing the finger would make
7551   // us cross into a new card, and if so clear corresponding
7552   // cards in the MUT (preclean them in the card-table in the
7553   // future).
7554 
7555   // The clean-on-enter optimization is disabled by default,
7556   // until we fix 6178663.
7557   if (CMSCleanOnEnter && (_finger > _threshold)) {
7558     // [_threshold, _finger) represents the interval
7559     // of cards to be cleared  in MUT (or precleaned in card table).
7560     // The set of cards to be cleared is all those that overlap
7561     // with the interval [_threshold, _finger); note that
7562     // _threshold is always kept card-aligned but _finger isn't
7563     // always card-aligned.
7564     HeapWord* old_threshold = _threshold;
7565     assert(old_threshold == (HeapWord*)round_to(
7566             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7567            "_threshold should always be card-aligned");
7568     _threshold = (HeapWord*)round_to(
7569                    (intptr_t)_finger, CardTableModRefBS::card_size);
7570     MemRegion mr(old_threshold, _threshold);
7571     assert(!mr.is_empty(), "Control point invariant");
7572     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7573     _mut->clear_range(mr);
7574   }
7575 
7576   // Note: the local finger doesn't advance while we drain
7577   // the stack below, but the global finger sure can and will.
7578   HeapWord** gfa = _task->global_finger_addr();
7579   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7580                                       _span, _bit_map,
7581                                       _work_queue,
7582                                       _overflow_stack,
7583                                       _finger,
7584                                       gfa, this);
7585   bool res = _work_queue->push(obj);   // overflow could occur here
7586   assert(res, "Will hold once we use workqueues");
7587   while (true) {
7588     oop new_oop;
7589     if (!_work_queue->pop_local(new_oop)) {
7590       // We emptied our work_queue; check if there's stuff that can
7591       // be gotten from the overflow stack.
7592       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7593             _overflow_stack, _work_queue)) {
7594         do_yield_check();
7595         continue;
7596       } else {  // done
7597         break;
7598       }
7599     }
7600     // Skip verifying header mark word below because we are
7601     // running concurrent with mutators.
7602     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7603     // now scan this oop's oops
7604     new_oop->oop_iterate(&pushOrMarkClosure);
7605     do_yield_check();
7606   }
7607   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7608 }
7609 
7610 // Yield in response to a request from VM Thread or
7611 // from mutators.
7612 void Par_MarkFromRootsClosure::do_yield_work() {
7613   assert(_task != NULL, "sanity");
7614   _task->yield();
7615 }
7616 
7617 // A variant of the above used for verifying CMS marking work.
7618 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7619                         MemRegion span,
7620                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7621                         CMSMarkStack*  mark_stack):
7622   _collector(collector),
7623   _span(span),
7624   _verification_bm(verification_bm),
7625   _cms_bm(cms_bm),
7626   _mark_stack(mark_stack),
7627   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7628                       mark_stack)
7629 {
7630   assert(_mark_stack->isEmpty(), "stack should be empty");
7631   _finger = _verification_bm->startWord();
7632   assert(_collector->_restart_addr == NULL, "Sanity check");
7633   assert(_span.contains(_finger), "Out of bounds _finger?");
7634 }
7635 
7636 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7637   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7638   assert(_span.contains(addr), "Out of bounds _finger?");
7639   _finger = addr;
7640 }
7641 
7642 // Should revisit to see if this should be restructured for
7643 // greater efficiency.
7644 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7645   // convert offset into a HeapWord*
7646   HeapWord* addr = _verification_bm->startWord() + offset;
7647   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7648          "address out of range");
7649   assert(_verification_bm->isMarked(addr), "tautology");
7650   assert(_cms_bm->isMarked(addr), "tautology");
7651 
7652   assert(_mark_stack->isEmpty(),
7653          "should drain stack to limit stack usage");
7654   // convert addr to an oop preparatory to scanning
7655   oop obj = oop(addr);
7656   assert(obj->is_oop(), "should be an oop");
7657   assert(_finger <= addr, "_finger runneth ahead");
7658   // advance the finger to right end of this object
7659   _finger = addr + obj->size();
7660   assert(_finger > addr, "we just incremented it above");
7661   // Note: the finger doesn't advance while we drain
7662   // the stack below.
7663   bool res = _mark_stack->push(obj);
7664   assert(res, "Empty non-zero size stack should have space for single push");
7665   while (!_mark_stack->isEmpty()) {
7666     oop new_oop = _mark_stack->pop();
7667     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7668     // now scan this oop's oops
7669     new_oop->oop_iterate(&_pam_verify_closure);
7670   }
7671   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7672   return true;
7673 }
7674 
7675 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7676   CMSCollector* collector, MemRegion span,
7677   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7678   CMSMarkStack*  mark_stack):
7679   CMSOopClosure(collector->ref_processor()),
7680   _collector(collector),
7681   _span(span),
7682   _verification_bm(verification_bm),
7683   _cms_bm(cms_bm),
7684   _mark_stack(mark_stack)
7685 { }
7686 
7687 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7688 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7689 
7690 // Upon stack overflow, we discard (part of) the stack,
7691 // remembering the least address amongst those discarded
7692 // in CMSCollector's _restart_address.
7693 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7694   // Remember the least grey address discarded
7695   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7696   _collector->lower_restart_addr(ra);
7697   _mark_stack->reset();  // discard stack contents
7698   _mark_stack->expand(); // expand the stack if possible
7699 }
7700 
7701 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7702   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7703   HeapWord* addr = (HeapWord*)obj;
7704   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7705     // Oop lies in _span and isn't yet grey or black
7706     _verification_bm->mark(addr);            // now grey
7707     if (!_cms_bm->isMarked(addr)) {
7708       oop(addr)->print();
7709       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7710                              addr);
7711       fatal("... aborting");
7712     }
7713 
7714     if (!_mark_stack->push(obj)) { // stack overflow
7715       if (PrintCMSStatistics != 0) {
7716         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7717                                SIZE_FORMAT, _mark_stack->capacity());
7718       }
7719       assert(_mark_stack->isFull(), "Else push should have succeeded");
7720       handle_stack_overflow(addr);
7721     }
7722     // anything including and to the right of _finger
7723     // will be scanned as we iterate over the remainder of the
7724     // bit map
7725   }
7726 }
7727 
7728 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7729                      MemRegion span,
7730                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7731                      HeapWord* finger, MarkFromRootsClosure* parent) :
7732   CMSOopClosure(collector->ref_processor()),
7733   _collector(collector),
7734   _span(span),
7735   _bitMap(bitMap),
7736   _markStack(markStack),
7737   _finger(finger),
7738   _parent(parent)
7739 { }
7740 
7741 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7742                      MemRegion span,
7743                      CMSBitMap* bit_map,
7744                      OopTaskQueue* work_queue,
7745                      CMSMarkStack*  overflow_stack,
7746                      HeapWord* finger,
7747                      HeapWord** global_finger_addr,
7748                      Par_MarkFromRootsClosure* parent) :
7749   CMSOopClosure(collector->ref_processor()),
7750   _collector(collector),
7751   _whole_span(collector->_span),
7752   _span(span),
7753   _bit_map(bit_map),
7754   _work_queue(work_queue),
7755   _overflow_stack(overflow_stack),
7756   _finger(finger),
7757   _global_finger_addr(global_finger_addr),
7758   _parent(parent)
7759 { }
7760 
7761 // Assumes thread-safe access by callers, who are
7762 // responsible for mutual exclusion.
7763 void CMSCollector::lower_restart_addr(HeapWord* low) {
7764   assert(_span.contains(low), "Out of bounds addr");
7765   if (_restart_addr == NULL) {
7766     _restart_addr = low;
7767   } else {
7768     _restart_addr = MIN2(_restart_addr, low);
7769   }
7770 }
7771 
7772 // Upon stack overflow, we discard (part of) the stack,
7773 // remembering the least address amongst those discarded
7774 // in CMSCollector's _restart_address.
7775 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7776   // Remember the least grey address discarded
7777   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7778   _collector->lower_restart_addr(ra);
7779   _markStack->reset();  // discard stack contents
7780   _markStack->expand(); // expand the stack if possible
7781 }
7782 
7783 // Upon stack overflow, we discard (part of) the stack,
7784 // remembering the least address amongst those discarded
7785 // in CMSCollector's _restart_address.
7786 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7787   // We need to do this under a mutex to prevent other
7788   // workers from interfering with the work done below.
7789   MutexLockerEx ml(_overflow_stack->par_lock(),
7790                    Mutex::_no_safepoint_check_flag);
7791   // Remember the least grey address discarded
7792   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7793   _collector->lower_restart_addr(ra);
7794   _overflow_stack->reset();  // discard stack contents
7795   _overflow_stack->expand(); // expand the stack if possible
7796 }
7797 
7798 void CMKlassClosure::do_klass(Klass* k) {
7799   assert(_oop_closure != NULL, "Not initialized?");
7800   k->oops_do(_oop_closure);
7801 }
7802 
7803 void PushOrMarkClosure::do_oop(oop obj) {
7804   // Ignore mark word because we are running concurrent with mutators.
7805   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7806   HeapWord* addr = (HeapWord*)obj;
7807   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7808     // Oop lies in _span and isn't yet grey or black
7809     _bitMap->mark(addr);            // now grey
7810     if (addr < _finger) {
7811       // the bit map iteration has already either passed, or
7812       // sampled, this bit in the bit map; we'll need to
7813       // use the marking stack to scan this oop's oops.
7814       bool simulate_overflow = false;
7815       NOT_PRODUCT(
7816         if (CMSMarkStackOverflowALot &&
7817             _collector->simulate_overflow()) {
7818           // simulate a stack overflow
7819           simulate_overflow = true;
7820         }
7821       )
7822       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7823         if (PrintCMSStatistics != 0) {
7824           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7825                                  SIZE_FORMAT, _markStack->capacity());
7826         }
7827         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7828         handle_stack_overflow(addr);
7829       }
7830     }
7831     // anything including and to the right of _finger
7832     // will be scanned as we iterate over the remainder of the
7833     // bit map
7834     do_yield_check();
7835   }
7836 }
7837 
7838 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7839 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7840 
7841 void Par_PushOrMarkClosure::do_oop(oop obj) {
7842   // Ignore mark word because we are running concurrent with mutators.
7843   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7844   HeapWord* addr = (HeapWord*)obj;
7845   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7846     // Oop lies in _span and isn't yet grey or black
7847     // We read the global_finger (volatile read) strictly after marking oop
7848     bool res = _bit_map->par_mark(addr);    // now grey
7849     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7850     // Should we push this marked oop on our stack?
7851     // -- if someone else marked it, nothing to do
7852     // -- if target oop is above global finger nothing to do
7853     // -- if target oop is in chunk and above local finger
7854     //      then nothing to do
7855     // -- else push on work queue
7856     if (   !res       // someone else marked it, they will deal with it
7857         || (addr >= *gfa)  // will be scanned in a later task
7858         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7859       return;
7860     }
7861     // the bit map iteration has already either passed, or
7862     // sampled, this bit in the bit map; we'll need to
7863     // use the marking stack to scan this oop's oops.
7864     bool simulate_overflow = false;
7865     NOT_PRODUCT(
7866       if (CMSMarkStackOverflowALot &&
7867           _collector->simulate_overflow()) {
7868         // simulate a stack overflow
7869         simulate_overflow = true;
7870       }
7871     )
7872     if (simulate_overflow ||
7873         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7874       // stack overflow
7875       if (PrintCMSStatistics != 0) {
7876         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7877                                SIZE_FORMAT, _overflow_stack->capacity());
7878       }
7879       // We cannot assert that the overflow stack is full because
7880       // it may have been emptied since.
7881       assert(simulate_overflow ||
7882              _work_queue->size() == _work_queue->max_elems(),
7883             "Else push should have succeeded");
7884       handle_stack_overflow(addr);
7885     }
7886     do_yield_check();
7887   }
7888 }
7889 
7890 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7891 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7892 
7893 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7894                                        MemRegion span,
7895                                        ReferenceProcessor* rp,
7896                                        CMSBitMap* bit_map,
7897                                        CMSBitMap* mod_union_table,
7898                                        CMSMarkStack*  mark_stack,
7899                                        bool           concurrent_precleaning):
7900   CMSOopClosure(rp),
7901   _collector(collector),
7902   _span(span),
7903   _bit_map(bit_map),
7904   _mod_union_table(mod_union_table),
7905   _mark_stack(mark_stack),
7906   _concurrent_precleaning(concurrent_precleaning)
7907 {
7908   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7909 }
7910 
7911 // Grey object rescan during pre-cleaning and second checkpoint phases --
7912 // the non-parallel version (the parallel version appears further below.)
7913 void PushAndMarkClosure::do_oop(oop obj) {
7914   // Ignore mark word verification. If during concurrent precleaning,
7915   // the object monitor may be locked. If during the checkpoint
7916   // phases, the object may already have been reached by a  different
7917   // path and may be at the end of the global overflow list (so
7918   // the mark word may be NULL).
7919   assert(obj->is_oop_or_null(true /* ignore mark word */),
7920          "expected an oop or NULL");
7921   HeapWord* addr = (HeapWord*)obj;
7922   // Check if oop points into the CMS generation
7923   // and is not marked
7924   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7925     // a white object ...
7926     _bit_map->mark(addr);         // ... now grey
7927     // push on the marking stack (grey set)
7928     bool simulate_overflow = false;
7929     NOT_PRODUCT(
7930       if (CMSMarkStackOverflowALot &&
7931           _collector->simulate_overflow()) {
7932         // simulate a stack overflow
7933         simulate_overflow = true;
7934       }
7935     )
7936     if (simulate_overflow || !_mark_stack->push(obj)) {
7937       if (_concurrent_precleaning) {
7938          // During precleaning we can just dirty the appropriate card(s)
7939          // in the mod union table, thus ensuring that the object remains
7940          // in the grey set  and continue. In the case of object arrays
7941          // we need to dirty all of the cards that the object spans,
7942          // since the rescan of object arrays will be limited to the
7943          // dirty cards.
7944          // Note that no one can be intefering with us in this action
7945          // of dirtying the mod union table, so no locking or atomics
7946          // are required.
7947          if (obj->is_objArray()) {
7948            size_t sz = obj->size();
7949            HeapWord* end_card_addr = (HeapWord*)round_to(
7950                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7951            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7952            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7953            _mod_union_table->mark_range(redirty_range);
7954          } else {
7955            _mod_union_table->mark(addr);
7956          }
7957          _collector->_ser_pmc_preclean_ovflw++;
7958       } else {
7959          // During the remark phase, we need to remember this oop
7960          // in the overflow list.
7961          _collector->push_on_overflow_list(obj);
7962          _collector->_ser_pmc_remark_ovflw++;
7963       }
7964     }
7965   }
7966 }
7967 
7968 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7969                                                MemRegion span,
7970                                                ReferenceProcessor* rp,
7971                                                CMSBitMap* bit_map,
7972                                                OopTaskQueue* work_queue):
7973   CMSOopClosure(rp),
7974   _collector(collector),
7975   _span(span),
7976   _bit_map(bit_map),
7977   _work_queue(work_queue)
7978 {
7979   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7980 }
7981 
7982 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7983 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7984 
7985 // Grey object rescan during second checkpoint phase --
7986 // the parallel version.
7987 void Par_PushAndMarkClosure::do_oop(oop obj) {
7988   // In the assert below, we ignore the mark word because
7989   // this oop may point to an already visited object that is
7990   // on the overflow stack (in which case the mark word has
7991   // been hijacked for chaining into the overflow stack --
7992   // if this is the last object in the overflow stack then
7993   // its mark word will be NULL). Because this object may
7994   // have been subsequently popped off the global overflow
7995   // stack, and the mark word possibly restored to the prototypical
7996   // value, by the time we get to examined this failing assert in
7997   // the debugger, is_oop_or_null(false) may subsequently start
7998   // to hold.
7999   assert(obj->is_oop_or_null(true),
8000          "expected an oop or NULL");
8001   HeapWord* addr = (HeapWord*)obj;
8002   // Check if oop points into the CMS generation
8003   // and is not marked
8004   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8005     // a white object ...
8006     // If we manage to "claim" the object, by being the
8007     // first thread to mark it, then we push it on our
8008     // marking stack
8009     if (_bit_map->par_mark(addr)) {     // ... now grey
8010       // push on work queue (grey set)
8011       bool simulate_overflow = false;
8012       NOT_PRODUCT(
8013         if (CMSMarkStackOverflowALot &&
8014             _collector->par_simulate_overflow()) {
8015           // simulate a stack overflow
8016           simulate_overflow = true;
8017         }
8018       )
8019       if (simulate_overflow || !_work_queue->push(obj)) {
8020         _collector->par_push_on_overflow_list(obj);
8021         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
8022       }
8023     } // Else, some other thread got there first
8024   }
8025 }
8026 
8027 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
8028 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8029 
8030 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8031   Mutex* bml = _collector->bitMapLock();
8032   assert_lock_strong(bml);
8033   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8034          "CMS thread should hold CMS token");
8035 
8036   bml->unlock();
8037   ConcurrentMarkSweepThread::desynchronize(true);
8038 
8039   ConcurrentMarkSweepThread::acknowledge_yield_request();
8040 
8041   _collector->stopTimer();
8042   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8043   if (PrintCMSStatistics != 0) {
8044     _collector->incrementYields();
8045   }
8046   _collector->icms_wait();
8047 
8048   // See the comment in coordinator_yield()
8049   for (unsigned i = 0; i < CMSYieldSleepCount &&
8050                        ConcurrentMarkSweepThread::should_yield() &&
8051                        !CMSCollector::foregroundGCIsActive(); ++i) {
8052     os::sleep(Thread::current(), 1, false);
8053     ConcurrentMarkSweepThread::acknowledge_yield_request();
8054   }
8055 
8056   ConcurrentMarkSweepThread::synchronize(true);
8057   bml->lock();
8058 
8059   _collector->startTimer();
8060 }
8061 
8062 bool CMSPrecleanRefsYieldClosure::should_return() {
8063   if (ConcurrentMarkSweepThread::should_yield()) {
8064     do_yield_work();
8065   }
8066   return _collector->foregroundGCIsActive();
8067 }
8068 
8069 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8070   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8071          "mr should be aligned to start at a card boundary");
8072   // We'd like to assert:
8073   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8074   //        "mr should be a range of cards");
8075   // However, that would be too strong in one case -- the last
8076   // partition ends at _unallocated_block which, in general, can be
8077   // an arbitrary boundary, not necessarily card aligned.
8078   if (PrintCMSStatistics != 0) {
8079     _num_dirty_cards +=
8080          mr.word_size()/CardTableModRefBS::card_size_in_words;
8081   }
8082   _space->object_iterate_mem(mr, &_scan_cl);
8083 }
8084 
8085 SweepClosure::SweepClosure(CMSCollector* collector,
8086                            ConcurrentMarkSweepGeneration* g,
8087                            CMSBitMap* bitMap, bool should_yield) :
8088   _collector(collector),
8089   _g(g),
8090   _sp(g->cmsSpace()),
8091   _limit(_sp->sweep_limit()),
8092   _freelistLock(_sp->freelistLock()),
8093   _bitMap(bitMap),
8094   _yield(should_yield),
8095   _inFreeRange(false),           // No free range at beginning of sweep
8096   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
8097   _lastFreeRangeCoalesced(false),
8098   _freeFinger(g->used_region().start())
8099 {
8100   NOT_PRODUCT(
8101     _numObjectsFreed = 0;
8102     _numWordsFreed   = 0;
8103     _numObjectsLive = 0;
8104     _numWordsLive = 0;
8105     _numObjectsAlreadyFree = 0;
8106     _numWordsAlreadyFree = 0;
8107     _last_fc = NULL;
8108 
8109     _sp->initializeIndexedFreeListArrayReturnedBytes();
8110     _sp->dictionary()->initialize_dict_returned_bytes();
8111   )
8112   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8113          "sweep _limit out of bounds");
8114   if (CMSTraceSweeper) {
8115     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8116                         _limit);
8117   }
8118 }
8119 
8120 void SweepClosure::print_on(outputStream* st) const {
8121   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8122                 _sp->bottom(), _sp->end());
8123   tty->print_cr("_limit = " PTR_FORMAT, _limit);
8124   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8125   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8126   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8127                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8128 }
8129 
8130 #ifndef PRODUCT
8131 // Assertion checking only:  no useful work in product mode --
8132 // however, if any of the flags below become product flags,
8133 // you may need to review this code to see if it needs to be
8134 // enabled in product mode.
8135 SweepClosure::~SweepClosure() {
8136   assert_lock_strong(_freelistLock);
8137   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8138          "sweep _limit out of bounds");
8139   if (inFreeRange()) {
8140     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8141     print();
8142     ShouldNotReachHere();
8143   }
8144   if (Verbose && PrintGC) {
8145     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8146                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8147     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
8148                            SIZE_FORMAT" bytes  "
8149       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8150       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8151       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8152     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8153                         * sizeof(HeapWord);
8154     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8155 
8156     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8157       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8158       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8159       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8160       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8161       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
8162         indexListReturnedBytes);
8163       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
8164         dict_returned_bytes);
8165     }
8166   }
8167   if (CMSTraceSweeper) {
8168     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8169                            _limit);
8170   }
8171 }
8172 #endif  // PRODUCT
8173 
8174 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8175     bool freeRangeInFreeLists) {
8176   if (CMSTraceSweeper) {
8177     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8178                freeFinger, freeRangeInFreeLists);
8179   }
8180   assert(!inFreeRange(), "Trampling existing free range");
8181   set_inFreeRange(true);
8182   set_lastFreeRangeCoalesced(false);
8183 
8184   set_freeFinger(freeFinger);
8185   set_freeRangeInFreeLists(freeRangeInFreeLists);
8186   if (CMSTestInFreeList) {
8187     if (freeRangeInFreeLists) {
8188       FreeChunk* fc = (FreeChunk*) freeFinger;
8189       assert(fc->is_free(), "A chunk on the free list should be free.");
8190       assert(fc->size() > 0, "Free range should have a size");
8191       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8192     }
8193   }
8194 }
8195 
8196 // Note that the sweeper runs concurrently with mutators. Thus,
8197 // it is possible for direct allocation in this generation to happen
8198 // in the middle of the sweep. Note that the sweeper also coalesces
8199 // contiguous free blocks. Thus, unless the sweeper and the allocator
8200 // synchronize appropriately freshly allocated blocks may get swept up.
8201 // This is accomplished by the sweeper locking the free lists while
8202 // it is sweeping. Thus blocks that are determined to be free are
8203 // indeed free. There is however one additional complication:
8204 // blocks that have been allocated since the final checkpoint and
8205 // mark, will not have been marked and so would be treated as
8206 // unreachable and swept up. To prevent this, the allocator marks
8207 // the bit map when allocating during the sweep phase. This leads,
8208 // however, to a further complication -- objects may have been allocated
8209 // but not yet initialized -- in the sense that the header isn't yet
8210 // installed. The sweeper can not then determine the size of the block
8211 // in order to skip over it. To deal with this case, we use a technique
8212 // (due to Printezis) to encode such uninitialized block sizes in the
8213 // bit map. Since the bit map uses a bit per every HeapWord, but the
8214 // CMS generation has a minimum object size of 3 HeapWords, it follows
8215 // that "normal marks" won't be adjacent in the bit map (there will
8216 // always be at least two 0 bits between successive 1 bits). We make use
8217 // of these "unused" bits to represent uninitialized blocks -- the bit
8218 // corresponding to the start of the uninitialized object and the next
8219 // bit are both set. Finally, a 1 bit marks the end of the object that
8220 // started with the two consecutive 1 bits to indicate its potentially
8221 // uninitialized state.
8222 
8223 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8224   FreeChunk* fc = (FreeChunk*)addr;
8225   size_t res;
8226 
8227   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8228   // than "addr == _limit" because although _limit was a block boundary when
8229   // we started the sweep, it may no longer be one because heap expansion
8230   // may have caused us to coalesce the block ending at the address _limit
8231   // with a newly expanded chunk (this happens when _limit was set to the
8232   // previous _end of the space), so we may have stepped past _limit:
8233   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8234   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8235     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8236            "sweep _limit out of bounds");
8237     assert(addr < _sp->end(), "addr out of bounds");
8238     // Flush any free range we might be holding as a single
8239     // coalesced chunk to the appropriate free list.
8240     if (inFreeRange()) {
8241       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8242              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8243       flush_cur_free_chunk(freeFinger(),
8244                            pointer_delta(addr, freeFinger()));
8245       if (CMSTraceSweeper) {
8246         gclog_or_tty->print("Sweep: last chunk: ");
8247         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8248                    "[coalesced:"SIZE_FORMAT"]\n",
8249                    freeFinger(), pointer_delta(addr, freeFinger()),
8250                    lastFreeRangeCoalesced());
8251       }
8252     }
8253 
8254     // help the iterator loop finish
8255     return pointer_delta(_sp->end(), addr);
8256   }
8257 
8258   assert(addr < _limit, "sweep invariant");
8259   // check if we should yield
8260   do_yield_check(addr);
8261   if (fc->is_free()) {
8262     // Chunk that is already free
8263     res = fc->size();
8264     do_already_free_chunk(fc);
8265     debug_only(_sp->verifyFreeLists());
8266     // If we flush the chunk at hand in lookahead_and_flush()
8267     // and it's coalesced with a preceding chunk, then the
8268     // process of "mangling" the payload of the coalesced block
8269     // will cause erasure of the size information from the
8270     // (erstwhile) header of all the coalesced blocks but the
8271     // first, so the first disjunct in the assert will not hold
8272     // in that specific case (in which case the second disjunct
8273     // will hold).
8274     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8275            "Otherwise the size info doesn't change at this step");
8276     NOT_PRODUCT(
8277       _numObjectsAlreadyFree++;
8278       _numWordsAlreadyFree += res;
8279     )
8280     NOT_PRODUCT(_last_fc = fc;)
8281   } else if (!_bitMap->isMarked(addr)) {
8282     // Chunk is fresh garbage
8283     res = do_garbage_chunk(fc);
8284     debug_only(_sp->verifyFreeLists());
8285     NOT_PRODUCT(
8286       _numObjectsFreed++;
8287       _numWordsFreed += res;
8288     )
8289   } else {
8290     // Chunk that is alive.
8291     res = do_live_chunk(fc);
8292     debug_only(_sp->verifyFreeLists());
8293     NOT_PRODUCT(
8294         _numObjectsLive++;
8295         _numWordsLive += res;
8296     )
8297   }
8298   return res;
8299 }
8300 
8301 // For the smart allocation, record following
8302 //  split deaths - a free chunk is removed from its free list because
8303 //      it is being split into two or more chunks.
8304 //  split birth - a free chunk is being added to its free list because
8305 //      a larger free chunk has been split and resulted in this free chunk.
8306 //  coal death - a free chunk is being removed from its free list because
8307 //      it is being coalesced into a large free chunk.
8308 //  coal birth - a free chunk is being added to its free list because
8309 //      it was created when two or more free chunks where coalesced into
8310 //      this free chunk.
8311 //
8312 // These statistics are used to determine the desired number of free
8313 // chunks of a given size.  The desired number is chosen to be relative
8314 // to the end of a CMS sweep.  The desired number at the end of a sweep
8315 // is the
8316 //      count-at-end-of-previous-sweep (an amount that was enough)
8317 //              - count-at-beginning-of-current-sweep  (the excess)
8318 //              + split-births  (gains in this size during interval)
8319 //              - split-deaths  (demands on this size during interval)
8320 // where the interval is from the end of one sweep to the end of the
8321 // next.
8322 //
8323 // When sweeping the sweeper maintains an accumulated chunk which is
8324 // the chunk that is made up of chunks that have been coalesced.  That
8325 // will be termed the left-hand chunk.  A new chunk of garbage that
8326 // is being considered for coalescing will be referred to as the
8327 // right-hand chunk.
8328 //
8329 // When making a decision on whether to coalesce a right-hand chunk with
8330 // the current left-hand chunk, the current count vs. the desired count
8331 // of the left-hand chunk is considered.  Also if the right-hand chunk
8332 // is near the large chunk at the end of the heap (see
8333 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8334 // left-hand chunk is coalesced.
8335 //
8336 // When making a decision about whether to split a chunk, the desired count
8337 // vs. the current count of the candidate to be split is also considered.
8338 // If the candidate is underpopulated (currently fewer chunks than desired)
8339 // a chunk of an overpopulated (currently more chunks than desired) size may
8340 // be chosen.  The "hint" associated with a free list, if non-null, points
8341 // to a free list which may be overpopulated.
8342 //
8343 
8344 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8345   const size_t size = fc->size();
8346   // Chunks that cannot be coalesced are not in the
8347   // free lists.
8348   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8349     assert(_sp->verify_chunk_in_free_list(fc),
8350       "free chunk should be in free lists");
8351   }
8352   // a chunk that is already free, should not have been
8353   // marked in the bit map
8354   HeapWord* const addr = (HeapWord*) fc;
8355   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8356   // Verify that the bit map has no bits marked between
8357   // addr and purported end of this block.
8358   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8359 
8360   // Some chunks cannot be coalesced under any circumstances.
8361   // See the definition of cantCoalesce().
8362   if (!fc->cantCoalesce()) {
8363     // This chunk can potentially be coalesced.
8364     if (_sp->adaptive_freelists()) {
8365       // All the work is done in
8366       do_post_free_or_garbage_chunk(fc, size);
8367     } else {  // Not adaptive free lists
8368       // this is a free chunk that can potentially be coalesced by the sweeper;
8369       if (!inFreeRange()) {
8370         // if the next chunk is a free block that can't be coalesced
8371         // it doesn't make sense to remove this chunk from the free lists
8372         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8373         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8374         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8375             nextChunk->is_free()               &&     // ... which is free...
8376             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8377           // nothing to do
8378         } else {
8379           // Potentially the start of a new free range:
8380           // Don't eagerly remove it from the free lists.
8381           // No need to remove it if it will just be put
8382           // back again.  (Also from a pragmatic point of view
8383           // if it is a free block in a region that is beyond
8384           // any allocated blocks, an assertion will fail)
8385           // Remember the start of a free run.
8386           initialize_free_range(addr, true);
8387           // end - can coalesce with next chunk
8388         }
8389       } else {
8390         // the midst of a free range, we are coalescing
8391         print_free_block_coalesced(fc);
8392         if (CMSTraceSweeper) {
8393           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8394         }
8395         // remove it from the free lists
8396         _sp->removeFreeChunkFromFreeLists(fc);
8397         set_lastFreeRangeCoalesced(true);
8398         // If the chunk is being coalesced and the current free range is
8399         // in the free lists, remove the current free range so that it
8400         // will be returned to the free lists in its entirety - all
8401         // the coalesced pieces included.
8402         if (freeRangeInFreeLists()) {
8403           FreeChunk* ffc = (FreeChunk*) freeFinger();
8404           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8405             "Size of free range is inconsistent with chunk size.");
8406           if (CMSTestInFreeList) {
8407             assert(_sp->verify_chunk_in_free_list(ffc),
8408               "free range is not in free lists");
8409           }
8410           _sp->removeFreeChunkFromFreeLists(ffc);
8411           set_freeRangeInFreeLists(false);
8412         }
8413       }
8414     }
8415     // Note that if the chunk is not coalescable (the else arm
8416     // below), we unconditionally flush, without needing to do
8417     // a "lookahead," as we do below.
8418     if (inFreeRange()) lookahead_and_flush(fc, size);
8419   } else {
8420     // Code path common to both original and adaptive free lists.
8421 
8422     // cant coalesce with previous block; this should be treated
8423     // as the end of a free run if any
8424     if (inFreeRange()) {
8425       // we kicked some butt; time to pick up the garbage
8426       assert(freeFinger() < addr, "freeFinger points too high");
8427       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8428     }
8429     // else, nothing to do, just continue
8430   }
8431 }
8432 
8433 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8434   // This is a chunk of garbage.  It is not in any free list.
8435   // Add it to a free list or let it possibly be coalesced into
8436   // a larger chunk.
8437   HeapWord* const addr = (HeapWord*) fc;
8438   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8439 
8440   if (_sp->adaptive_freelists()) {
8441     // Verify that the bit map has no bits marked between
8442     // addr and purported end of just dead object.
8443     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8444 
8445     do_post_free_or_garbage_chunk(fc, size);
8446   } else {
8447     if (!inFreeRange()) {
8448       // start of a new free range
8449       assert(size > 0, "A free range should have a size");
8450       initialize_free_range(addr, false);
8451     } else {
8452       // this will be swept up when we hit the end of the
8453       // free range
8454       if (CMSTraceSweeper) {
8455         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8456       }
8457       // If the chunk is being coalesced and the current free range is
8458       // in the free lists, remove the current free range so that it
8459       // will be returned to the free lists in its entirety - all
8460       // the coalesced pieces included.
8461       if (freeRangeInFreeLists()) {
8462         FreeChunk* ffc = (FreeChunk*)freeFinger();
8463         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8464           "Size of free range is inconsistent with chunk size.");
8465         if (CMSTestInFreeList) {
8466           assert(_sp->verify_chunk_in_free_list(ffc),
8467             "free range is not in free lists");
8468         }
8469         _sp->removeFreeChunkFromFreeLists(ffc);
8470         set_freeRangeInFreeLists(false);
8471       }
8472       set_lastFreeRangeCoalesced(true);
8473     }
8474     // this will be swept up when we hit the end of the free range
8475 
8476     // Verify that the bit map has no bits marked between
8477     // addr and purported end of just dead object.
8478     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8479   }
8480   assert(_limit >= addr + size,
8481          "A freshly garbage chunk can't possibly straddle over _limit");
8482   if (inFreeRange()) lookahead_and_flush(fc, size);
8483   return size;
8484 }
8485 
8486 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8487   HeapWord* addr = (HeapWord*) fc;
8488   // The sweeper has just found a live object. Return any accumulated
8489   // left hand chunk to the free lists.
8490   if (inFreeRange()) {
8491     assert(freeFinger() < addr, "freeFinger points too high");
8492     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8493   }
8494 
8495   // This object is live: we'd normally expect this to be
8496   // an oop, and like to assert the following:
8497   // assert(oop(addr)->is_oop(), "live block should be an oop");
8498   // However, as we commented above, this may be an object whose
8499   // header hasn't yet been initialized.
8500   size_t size;
8501   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8502   if (_bitMap->isMarked(addr + 1)) {
8503     // Determine the size from the bit map, rather than trying to
8504     // compute it from the object header.
8505     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8506     size = pointer_delta(nextOneAddr + 1, addr);
8507     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8508            "alignment problem");
8509 
8510 #ifdef ASSERT
8511       if (oop(addr)->klass_or_null() != NULL) {
8512         // Ignore mark word because we are running concurrent with mutators
8513         assert(oop(addr)->is_oop(true), "live block should be an oop");
8514         assert(size ==
8515                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8516                "P-mark and computed size do not agree");
8517       }
8518 #endif
8519 
8520   } else {
8521     // This should be an initialized object that's alive.
8522     assert(oop(addr)->klass_or_null() != NULL,
8523            "Should be an initialized object");
8524     // Ignore mark word because we are running concurrent with mutators
8525     assert(oop(addr)->is_oop(true), "live block should be an oop");
8526     // Verify that the bit map has no bits marked between
8527     // addr and purported end of this block.
8528     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8529     assert(size >= 3, "Necessary for Printezis marks to work");
8530     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8531     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8532   }
8533   return size;
8534 }
8535 
8536 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8537                                                  size_t chunkSize) {
8538   // do_post_free_or_garbage_chunk() should only be called in the case
8539   // of the adaptive free list allocator.
8540   const bool fcInFreeLists = fc->is_free();
8541   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8542   assert((HeapWord*)fc <= _limit, "sweep invariant");
8543   if (CMSTestInFreeList && fcInFreeLists) {
8544     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8545   }
8546 
8547   if (CMSTraceSweeper) {
8548     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8549   }
8550 
8551   HeapWord* const fc_addr = (HeapWord*) fc;
8552 
8553   bool coalesce;
8554   const size_t left  = pointer_delta(fc_addr, freeFinger());
8555   const size_t right = chunkSize;
8556   switch (FLSCoalescePolicy) {
8557     // numeric value forms a coalition aggressiveness metric
8558     case 0:  { // never coalesce
8559       coalesce = false;
8560       break;
8561     }
8562     case 1: { // coalesce if left & right chunks on overpopulated lists
8563       coalesce = _sp->coalOverPopulated(left) &&
8564                  _sp->coalOverPopulated(right);
8565       break;
8566     }
8567     case 2: { // coalesce if left chunk on overpopulated list (default)
8568       coalesce = _sp->coalOverPopulated(left);
8569       break;
8570     }
8571     case 3: { // coalesce if left OR right chunk on overpopulated list
8572       coalesce = _sp->coalOverPopulated(left) ||
8573                  _sp->coalOverPopulated(right);
8574       break;
8575     }
8576     case 4: { // always coalesce
8577       coalesce = true;
8578       break;
8579     }
8580     default:
8581      ShouldNotReachHere();
8582   }
8583 
8584   // Should the current free range be coalesced?
8585   // If the chunk is in a free range and either we decided to coalesce above
8586   // or the chunk is near the large block at the end of the heap
8587   // (isNearLargestChunk() returns true), then coalesce this chunk.
8588   const bool doCoalesce = inFreeRange()
8589                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8590   if (doCoalesce) {
8591     // Coalesce the current free range on the left with the new
8592     // chunk on the right.  If either is on a free list,
8593     // it must be removed from the list and stashed in the closure.
8594     if (freeRangeInFreeLists()) {
8595       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8596       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8597         "Size of free range is inconsistent with chunk size.");
8598       if (CMSTestInFreeList) {
8599         assert(_sp->verify_chunk_in_free_list(ffc),
8600           "Chunk is not in free lists");
8601       }
8602       _sp->coalDeath(ffc->size());
8603       _sp->removeFreeChunkFromFreeLists(ffc);
8604       set_freeRangeInFreeLists(false);
8605     }
8606     if (fcInFreeLists) {
8607       _sp->coalDeath(chunkSize);
8608       assert(fc->size() == chunkSize,
8609         "The chunk has the wrong size or is not in the free lists");
8610       _sp->removeFreeChunkFromFreeLists(fc);
8611     }
8612     set_lastFreeRangeCoalesced(true);
8613     print_free_block_coalesced(fc);
8614   } else {  // not in a free range and/or should not coalesce
8615     // Return the current free range and start a new one.
8616     if (inFreeRange()) {
8617       // In a free range but cannot coalesce with the right hand chunk.
8618       // Put the current free range into the free lists.
8619       flush_cur_free_chunk(freeFinger(),
8620                            pointer_delta(fc_addr, freeFinger()));
8621     }
8622     // Set up for new free range.  Pass along whether the right hand
8623     // chunk is in the free lists.
8624     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8625   }
8626 }
8627 
8628 // Lookahead flush:
8629 // If we are tracking a free range, and this is the last chunk that
8630 // we'll look at because its end crosses past _limit, we'll preemptively
8631 // flush it along with any free range we may be holding on to. Note that
8632 // this can be the case only for an already free or freshly garbage
8633 // chunk. If this block is an object, it can never straddle
8634 // over _limit. The "straddling" occurs when _limit is set at
8635 // the previous end of the space when this cycle started, and
8636 // a subsequent heap expansion caused the previously co-terminal
8637 // free block to be coalesced with the newly expanded portion,
8638 // thus rendering _limit a non-block-boundary making it dangerous
8639 // for the sweeper to step over and examine.
8640 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8641   assert(inFreeRange(), "Should only be called if currently in a free range.");
8642   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8643   assert(_sp->used_region().contains(eob - 1),
8644          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8645                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8646                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8647   if (eob >= _limit) {
8648     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8649     if (CMSTraceSweeper) {
8650       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8651                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8652                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8653                              _limit, fc, eob, _sp->bottom(), _sp->end());
8654     }
8655     // Return the storage we are tracking back into the free lists.
8656     if (CMSTraceSweeper) {
8657       gclog_or_tty->print_cr("Flushing ... ");
8658     }
8659     assert(freeFinger() < eob, "Error");
8660     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8661   }
8662 }
8663 
8664 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8665   assert(inFreeRange(), "Should only be called if currently in a free range.");
8666   assert(size > 0,
8667     "A zero sized chunk cannot be added to the free lists.");
8668   if (!freeRangeInFreeLists()) {
8669     if (CMSTestInFreeList) {
8670       FreeChunk* fc = (FreeChunk*) chunk;
8671       fc->set_size(size);
8672       assert(!_sp->verify_chunk_in_free_list(fc),
8673         "chunk should not be in free lists yet");
8674     }
8675     if (CMSTraceSweeper) {
8676       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8677                     chunk, size);
8678     }
8679     // A new free range is going to be starting.  The current
8680     // free range has not been added to the free lists yet or
8681     // was removed so add it back.
8682     // If the current free range was coalesced, then the death
8683     // of the free range was recorded.  Record a birth now.
8684     if (lastFreeRangeCoalesced()) {
8685       _sp->coalBirth(size);
8686     }
8687     _sp->addChunkAndRepairOffsetTable(chunk, size,
8688             lastFreeRangeCoalesced());
8689   } else if (CMSTraceSweeper) {
8690     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8691   }
8692   set_inFreeRange(false);
8693   set_freeRangeInFreeLists(false);
8694 }
8695 
8696 // We take a break if we've been at this for a while,
8697 // so as to avoid monopolizing the locks involved.
8698 void SweepClosure::do_yield_work(HeapWord* addr) {
8699   // Return current free chunk being used for coalescing (if any)
8700   // to the appropriate freelist.  After yielding, the next
8701   // free block encountered will start a coalescing range of
8702   // free blocks.  If the next free block is adjacent to the
8703   // chunk just flushed, they will need to wait for the next
8704   // sweep to be coalesced.
8705   if (inFreeRange()) {
8706     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8707   }
8708 
8709   // First give up the locks, then yield, then re-lock.
8710   // We should probably use a constructor/destructor idiom to
8711   // do this unlock/lock or modify the MutexUnlocker class to
8712   // serve our purpose. XXX
8713   assert_lock_strong(_bitMap->lock());
8714   assert_lock_strong(_freelistLock);
8715   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8716          "CMS thread should hold CMS token");
8717   _bitMap->lock()->unlock();
8718   _freelistLock->unlock();
8719   ConcurrentMarkSweepThread::desynchronize(true);
8720   ConcurrentMarkSweepThread::acknowledge_yield_request();
8721   _collector->stopTimer();
8722   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8723   if (PrintCMSStatistics != 0) {
8724     _collector->incrementYields();
8725   }
8726   _collector->icms_wait();
8727 
8728   // See the comment in coordinator_yield()
8729   for (unsigned i = 0; i < CMSYieldSleepCount &&
8730                        ConcurrentMarkSweepThread::should_yield() &&
8731                        !CMSCollector::foregroundGCIsActive(); ++i) {
8732     os::sleep(Thread::current(), 1, false);
8733     ConcurrentMarkSweepThread::acknowledge_yield_request();
8734   }
8735 
8736   ConcurrentMarkSweepThread::synchronize(true);
8737   _freelistLock->lock();
8738   _bitMap->lock()->lock_without_safepoint_check();
8739   _collector->startTimer();
8740 }
8741 
8742 #ifndef PRODUCT
8743 // This is actually very useful in a product build if it can
8744 // be called from the debugger.  Compile it into the product
8745 // as needed.
8746 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8747   return debug_cms_space->verify_chunk_in_free_list(fc);
8748 }
8749 #endif
8750 
8751 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8752   if (CMSTraceSweeper) {
8753     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8754                            fc, fc->size());
8755   }
8756 }
8757 
8758 // CMSIsAliveClosure
8759 bool CMSIsAliveClosure::do_object_b(oop obj) {
8760   HeapWord* addr = (HeapWord*)obj;
8761   return addr != NULL &&
8762          (!_span.contains(addr) || _bit_map->isMarked(addr));
8763 }
8764 
8765 
8766 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8767                       MemRegion span,
8768                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8769                       bool cpc):
8770   _collector(collector),
8771   _span(span),
8772   _bit_map(bit_map),
8773   _mark_stack(mark_stack),
8774   _concurrent_precleaning(cpc) {
8775   assert(!_span.is_empty(), "Empty span could spell trouble");
8776 }
8777 
8778 
8779 // CMSKeepAliveClosure: the serial version
8780 void CMSKeepAliveClosure::do_oop(oop obj) {
8781   HeapWord* addr = (HeapWord*)obj;
8782   if (_span.contains(addr) &&
8783       !_bit_map->isMarked(addr)) {
8784     _bit_map->mark(addr);
8785     bool simulate_overflow = false;
8786     NOT_PRODUCT(
8787       if (CMSMarkStackOverflowALot &&
8788           _collector->simulate_overflow()) {
8789         // simulate a stack overflow
8790         simulate_overflow = true;
8791       }
8792     )
8793     if (simulate_overflow || !_mark_stack->push(obj)) {
8794       if (_concurrent_precleaning) {
8795         // We dirty the overflown object and let the remark
8796         // phase deal with it.
8797         assert(_collector->overflow_list_is_empty(), "Error");
8798         // In the case of object arrays, we need to dirty all of
8799         // the cards that the object spans. No locking or atomics
8800         // are needed since no one else can be mutating the mod union
8801         // table.
8802         if (obj->is_objArray()) {
8803           size_t sz = obj->size();
8804           HeapWord* end_card_addr =
8805             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8806           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8807           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8808           _collector->_modUnionTable.mark_range(redirty_range);
8809         } else {
8810           _collector->_modUnionTable.mark(addr);
8811         }
8812         _collector->_ser_kac_preclean_ovflw++;
8813       } else {
8814         _collector->push_on_overflow_list(obj);
8815         _collector->_ser_kac_ovflw++;
8816       }
8817     }
8818   }
8819 }
8820 
8821 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8822 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8823 
8824 // CMSParKeepAliveClosure: a parallel version of the above.
8825 // The work queues are private to each closure (thread),
8826 // but (may be) available for stealing by other threads.
8827 void CMSParKeepAliveClosure::do_oop(oop obj) {
8828   HeapWord* addr = (HeapWord*)obj;
8829   if (_span.contains(addr) &&
8830       !_bit_map->isMarked(addr)) {
8831     // In general, during recursive tracing, several threads
8832     // may be concurrently getting here; the first one to
8833     // "tag" it, claims it.
8834     if (_bit_map->par_mark(addr)) {
8835       bool res = _work_queue->push(obj);
8836       assert(res, "Low water mark should be much less than capacity");
8837       // Do a recursive trim in the hope that this will keep
8838       // stack usage lower, but leave some oops for potential stealers
8839       trim_queue(_low_water_mark);
8840     } // Else, another thread got there first
8841   }
8842 }
8843 
8844 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8845 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8846 
8847 void CMSParKeepAliveClosure::trim_queue(uint max) {
8848   while (_work_queue->size() > max) {
8849     oop new_oop;
8850     if (_work_queue->pop_local(new_oop)) {
8851       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8852       assert(_bit_map->isMarked((HeapWord*)new_oop),
8853              "no white objects on this stack!");
8854       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8855       // iterate over the oops in this oop, marking and pushing
8856       // the ones in CMS heap (i.e. in _span).
8857       new_oop->oop_iterate(&_mark_and_push);
8858     }
8859   }
8860 }
8861 
8862 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8863                                 CMSCollector* collector,
8864                                 MemRegion span, CMSBitMap* bit_map,
8865                                 OopTaskQueue* work_queue):
8866   _collector(collector),
8867   _span(span),
8868   _bit_map(bit_map),
8869   _work_queue(work_queue) { }
8870 
8871 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8872   HeapWord* addr = (HeapWord*)obj;
8873   if (_span.contains(addr) &&
8874       !_bit_map->isMarked(addr)) {
8875     if (_bit_map->par_mark(addr)) {
8876       bool simulate_overflow = false;
8877       NOT_PRODUCT(
8878         if (CMSMarkStackOverflowALot &&
8879             _collector->par_simulate_overflow()) {
8880           // simulate a stack overflow
8881           simulate_overflow = true;
8882         }
8883       )
8884       if (simulate_overflow || !_work_queue->push(obj)) {
8885         _collector->par_push_on_overflow_list(obj);
8886         _collector->_par_kac_ovflw++;
8887       }
8888     } // Else another thread got there already
8889   }
8890 }
8891 
8892 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8893 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8894 
8895 //////////////////////////////////////////////////////////////////
8896 //  CMSExpansionCause                /////////////////////////////
8897 //////////////////////////////////////////////////////////////////
8898 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8899   switch (cause) {
8900     case _no_expansion:
8901       return "No expansion";
8902     case _satisfy_free_ratio:
8903       return "Free ratio";
8904     case _satisfy_promotion:
8905       return "Satisfy promotion";
8906     case _satisfy_allocation:
8907       return "allocation";
8908     case _allocate_par_lab:
8909       return "Par LAB";
8910     case _allocate_par_spooling_space:
8911       return "Par Spooling Space";
8912     case _adaptive_size_policy:
8913       return "Ergonomics";
8914     default:
8915       return "unknown";
8916   }
8917 }
8918 
8919 void CMSDrainMarkingStackClosure::do_void() {
8920   // the max number to take from overflow list at a time
8921   const size_t num = _mark_stack->capacity()/4;
8922   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8923          "Overflow list should be NULL during concurrent phases");
8924   while (!_mark_stack->isEmpty() ||
8925          // if stack is empty, check the overflow list
8926          _collector->take_from_overflow_list(num, _mark_stack)) {
8927     oop obj = _mark_stack->pop();
8928     HeapWord* addr = (HeapWord*)obj;
8929     assert(_span.contains(addr), "Should be within span");
8930     assert(_bit_map->isMarked(addr), "Should be marked");
8931     assert(obj->is_oop(), "Should be an oop");
8932     obj->oop_iterate(_keep_alive);
8933   }
8934 }
8935 
8936 void CMSParDrainMarkingStackClosure::do_void() {
8937   // drain queue
8938   trim_queue(0);
8939 }
8940 
8941 // Trim our work_queue so its length is below max at return
8942 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8943   while (_work_queue->size() > max) {
8944     oop new_oop;
8945     if (_work_queue->pop_local(new_oop)) {
8946       assert(new_oop->is_oop(), "Expected an oop");
8947       assert(_bit_map->isMarked((HeapWord*)new_oop),
8948              "no white objects on this stack!");
8949       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8950       // iterate over the oops in this oop, marking and pushing
8951       // the ones in CMS heap (i.e. in _span).
8952       new_oop->oop_iterate(&_mark_and_push);
8953     }
8954   }
8955 }
8956 
8957 ////////////////////////////////////////////////////////////////////
8958 // Support for Marking Stack Overflow list handling and related code
8959 ////////////////////////////////////////////////////////////////////
8960 // Much of the following code is similar in shape and spirit to the
8961 // code used in ParNewGC. We should try and share that code
8962 // as much as possible in the future.
8963 
8964 #ifndef PRODUCT
8965 // Debugging support for CMSStackOverflowALot
8966 
8967 // It's OK to call this multi-threaded;  the worst thing
8968 // that can happen is that we'll get a bunch of closely
8969 // spaced simulated oveflows, but that's OK, in fact
8970 // probably good as it would exercise the overflow code
8971 // under contention.
8972 bool CMSCollector::simulate_overflow() {
8973   if (_overflow_counter-- <= 0) { // just being defensive
8974     _overflow_counter = CMSMarkStackOverflowInterval;
8975     return true;
8976   } else {
8977     return false;
8978   }
8979 }
8980 
8981 bool CMSCollector::par_simulate_overflow() {
8982   return simulate_overflow();
8983 }
8984 #endif
8985 
8986 // Single-threaded
8987 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8988   assert(stack->isEmpty(), "Expected precondition");
8989   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8990   size_t i = num;
8991   oop  cur = _overflow_list;
8992   const markOop proto = markOopDesc::prototype();
8993   NOT_PRODUCT(ssize_t n = 0;)
8994   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8995     next = oop(cur->mark());
8996     cur->set_mark(proto);   // until proven otherwise
8997     assert(cur->is_oop(), "Should be an oop");
8998     bool res = stack->push(cur);
8999     assert(res, "Bit off more than can chew?");
9000     NOT_PRODUCT(n++;)
9001   }
9002   _overflow_list = cur;
9003 #ifndef PRODUCT
9004   assert(_num_par_pushes >= n, "Too many pops?");
9005   _num_par_pushes -=n;
9006 #endif
9007   return !stack->isEmpty();
9008 }
9009 
9010 #define BUSY  (oop(0x1aff1aff))
9011 // (MT-safe) Get a prefix of at most "num" from the list.
9012 // The overflow list is chained through the mark word of
9013 // each object in the list. We fetch the entire list,
9014 // break off a prefix of the right size and return the
9015 // remainder. If other threads try to take objects from
9016 // the overflow list at that time, they will wait for
9017 // some time to see if data becomes available. If (and
9018 // only if) another thread places one or more object(s)
9019 // on the global list before we have returned the suffix
9020 // to the global list, we will walk down our local list
9021 // to find its end and append the global list to
9022 // our suffix before returning it. This suffix walk can
9023 // prove to be expensive (quadratic in the amount of traffic)
9024 // when there are many objects in the overflow list and
9025 // there is much producer-consumer contention on the list.
9026 // *NOTE*: The overflow list manipulation code here and
9027 // in ParNewGeneration:: are very similar in shape,
9028 // except that in the ParNew case we use the old (from/eden)
9029 // copy of the object to thread the list via its klass word.
9030 // Because of the common code, if you make any changes in
9031 // the code below, please check the ParNew version to see if
9032 // similar changes might be needed.
9033 // CR 6797058 has been filed to consolidate the common code.
9034 bool CMSCollector::par_take_from_overflow_list(size_t num,
9035                                                OopTaskQueue* work_q,
9036                                                int no_of_gc_threads) {
9037   assert(work_q->size() == 0, "First empty local work queue");
9038   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9039   if (_overflow_list == NULL) {
9040     return false;
9041   }
9042   // Grab the entire list; we'll put back a suffix
9043   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9044   Thread* tid = Thread::current();
9045   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9046   // set to ParallelGCThreads.
9047   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9048   size_t sleep_time_millis = MAX2((size_t)1, num/100);
9049   // If the list is busy, we spin for a short while,
9050   // sleeping between attempts to get the list.
9051   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9052     os::sleep(tid, sleep_time_millis, false);
9053     if (_overflow_list == NULL) {
9054       // Nothing left to take
9055       return false;
9056     } else if (_overflow_list != BUSY) {
9057       // Try and grab the prefix
9058       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
9059     }
9060   }
9061   // If the list was found to be empty, or we spun long
9062   // enough, we give up and return empty-handed. If we leave
9063   // the list in the BUSY state below, it must be the case that
9064   // some other thread holds the overflow list and will set it
9065   // to a non-BUSY state in the future.
9066   if (prefix == NULL || prefix == BUSY) {
9067      // Nothing to take or waited long enough
9068      if (prefix == NULL) {
9069        // Write back the NULL in case we overwrote it with BUSY above
9070        // and it is still the same value.
9071        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9072      }
9073      return false;
9074   }
9075   assert(prefix != NULL && prefix != BUSY, "Error");
9076   size_t i = num;
9077   oop cur = prefix;
9078   // Walk down the first "num" objects, unless we reach the end.
9079   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9080   if (cur->mark() == NULL) {
9081     // We have "num" or fewer elements in the list, so there
9082     // is nothing to return to the global list.
9083     // Write back the NULL in lieu of the BUSY we wrote
9084     // above, if it is still the same value.
9085     if (_overflow_list == BUSY) {
9086       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9087     }
9088   } else {
9089     // Chop off the suffix and rerturn it to the global list.
9090     assert(cur->mark() != BUSY, "Error");
9091     oop suffix_head = cur->mark(); // suffix will be put back on global list
9092     cur->set_mark(NULL);           // break off suffix
9093     // It's possible that the list is still in the empty(busy) state
9094     // we left it in a short while ago; in that case we may be
9095     // able to place back the suffix without incurring the cost
9096     // of a walk down the list.
9097     oop observed_overflow_list = _overflow_list;
9098     oop cur_overflow_list = observed_overflow_list;
9099     bool attached = false;
9100     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9101       observed_overflow_list =
9102         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9103       if (cur_overflow_list == observed_overflow_list) {
9104         attached = true;
9105         break;
9106       } else cur_overflow_list = observed_overflow_list;
9107     }
9108     if (!attached) {
9109       // Too bad, someone else sneaked in (at least) an element; we'll need
9110       // to do a splice. Find tail of suffix so we can prepend suffix to global
9111       // list.
9112       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9113       oop suffix_tail = cur;
9114       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9115              "Tautology");
9116       observed_overflow_list = _overflow_list;
9117       do {
9118         cur_overflow_list = observed_overflow_list;
9119         if (cur_overflow_list != BUSY) {
9120           // Do the splice ...
9121           suffix_tail->set_mark(markOop(cur_overflow_list));
9122         } else { // cur_overflow_list == BUSY
9123           suffix_tail->set_mark(NULL);
9124         }
9125         // ... and try to place spliced list back on overflow_list ...
9126         observed_overflow_list =
9127           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9128       } while (cur_overflow_list != observed_overflow_list);
9129       // ... until we have succeeded in doing so.
9130     }
9131   }
9132 
9133   // Push the prefix elements on work_q
9134   assert(prefix != NULL, "control point invariant");
9135   const markOop proto = markOopDesc::prototype();
9136   oop next;
9137   NOT_PRODUCT(ssize_t n = 0;)
9138   for (cur = prefix; cur != NULL; cur = next) {
9139     next = oop(cur->mark());
9140     cur->set_mark(proto);   // until proven otherwise
9141     assert(cur->is_oop(), "Should be an oop");
9142     bool res = work_q->push(cur);
9143     assert(res, "Bit off more than we can chew?");
9144     NOT_PRODUCT(n++;)
9145   }
9146 #ifndef PRODUCT
9147   assert(_num_par_pushes >= n, "Too many pops?");
9148   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9149 #endif
9150   return true;
9151 }
9152 
9153 // Single-threaded
9154 void CMSCollector::push_on_overflow_list(oop p) {
9155   NOT_PRODUCT(_num_par_pushes++;)
9156   assert(p->is_oop(), "Not an oop");
9157   preserve_mark_if_necessary(p);
9158   p->set_mark((markOop)_overflow_list);
9159   _overflow_list = p;
9160 }
9161 
9162 // Multi-threaded; use CAS to prepend to overflow list
9163 void CMSCollector::par_push_on_overflow_list(oop p) {
9164   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9165   assert(p->is_oop(), "Not an oop");
9166   par_preserve_mark_if_necessary(p);
9167   oop observed_overflow_list = _overflow_list;
9168   oop cur_overflow_list;
9169   do {
9170     cur_overflow_list = observed_overflow_list;
9171     if (cur_overflow_list != BUSY) {
9172       p->set_mark(markOop(cur_overflow_list));
9173     } else {
9174       p->set_mark(NULL);
9175     }
9176     observed_overflow_list =
9177       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9178   } while (cur_overflow_list != observed_overflow_list);
9179 }
9180 #undef BUSY
9181 
9182 // Single threaded
9183 // General Note on GrowableArray: pushes may silently fail
9184 // because we are (temporarily) out of C-heap for expanding
9185 // the stack. The problem is quite ubiquitous and affects
9186 // a lot of code in the JVM. The prudent thing for GrowableArray
9187 // to do (for now) is to exit with an error. However, that may
9188 // be too draconian in some cases because the caller may be
9189 // able to recover without much harm. For such cases, we
9190 // should probably introduce a "soft_push" method which returns
9191 // an indication of success or failure with the assumption that
9192 // the caller may be able to recover from a failure; code in
9193 // the VM can then be changed, incrementally, to deal with such
9194 // failures where possible, thus, incrementally hardening the VM
9195 // in such low resource situations.
9196 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9197   _preserved_oop_stack.push(p);
9198   _preserved_mark_stack.push(m);
9199   assert(m == p->mark(), "Mark word changed");
9200   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9201          "bijection");
9202 }
9203 
9204 // Single threaded
9205 void CMSCollector::preserve_mark_if_necessary(oop p) {
9206   markOop m = p->mark();
9207   if (m->must_be_preserved(p)) {
9208     preserve_mark_work(p, m);
9209   }
9210 }
9211 
9212 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9213   markOop m = p->mark();
9214   if (m->must_be_preserved(p)) {
9215     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9216     // Even though we read the mark word without holding
9217     // the lock, we are assured that it will not change
9218     // because we "own" this oop, so no other thread can
9219     // be trying to push it on the overflow list; see
9220     // the assertion in preserve_mark_work() that checks
9221     // that m == p->mark().
9222     preserve_mark_work(p, m);
9223   }
9224 }
9225 
9226 // We should be able to do this multi-threaded,
9227 // a chunk of stack being a task (this is
9228 // correct because each oop only ever appears
9229 // once in the overflow list. However, it's
9230 // not very easy to completely overlap this with
9231 // other operations, so will generally not be done
9232 // until all work's been completed. Because we
9233 // expect the preserved oop stack (set) to be small,
9234 // it's probably fine to do this single-threaded.
9235 // We can explore cleverer concurrent/overlapped/parallel
9236 // processing of preserved marks if we feel the
9237 // need for this in the future. Stack overflow should
9238 // be so rare in practice and, when it happens, its
9239 // effect on performance so great that this will
9240 // likely just be in the noise anyway.
9241 void CMSCollector::restore_preserved_marks_if_any() {
9242   assert(SafepointSynchronize::is_at_safepoint(),
9243          "world should be stopped");
9244   assert(Thread::current()->is_ConcurrentGC_thread() ||
9245          Thread::current()->is_VM_thread(),
9246          "should be single-threaded");
9247   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9248          "bijection");
9249 
9250   while (!_preserved_oop_stack.is_empty()) {
9251     oop p = _preserved_oop_stack.pop();
9252     assert(p->is_oop(), "Should be an oop");
9253     assert(_span.contains(p), "oop should be in _span");
9254     assert(p->mark() == markOopDesc::prototype(),
9255            "Set when taken from overflow list");
9256     markOop m = _preserved_mark_stack.pop();
9257     p->set_mark(m);
9258   }
9259   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9260          "stacks were cleared above");
9261 }
9262 
9263 #ifndef PRODUCT
9264 bool CMSCollector::no_preserved_marks() const {
9265   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9266 }
9267 #endif
9268 
9269 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9270 {
9271   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9272   CMSAdaptiveSizePolicy* size_policy =
9273     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9274   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9275     "Wrong type for size policy");
9276   return size_policy;
9277 }
9278 
9279 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9280                                            size_t desired_promo_size) {
9281   if (cur_promo_size < desired_promo_size) {
9282     size_t expand_bytes = desired_promo_size - cur_promo_size;
9283     if (PrintAdaptiveSizePolicy && Verbose) {
9284       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9285         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9286         expand_bytes);
9287     }
9288     expand(expand_bytes,
9289            MinHeapDeltaBytes,
9290            CMSExpansionCause::_adaptive_size_policy);
9291   } else if (desired_promo_size < cur_promo_size) {
9292     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9293     if (PrintAdaptiveSizePolicy && Verbose) {
9294       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9295         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9296         shrink_bytes);
9297     }
9298     shrink(shrink_bytes);
9299   }
9300 }
9301 
9302 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9303   GenCollectedHeap* gch = GenCollectedHeap::heap();
9304   CMSGCAdaptivePolicyCounters* counters =
9305     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9306   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9307     "Wrong kind of counters");
9308   return counters;
9309 }
9310 
9311 
9312 void ASConcurrentMarkSweepGeneration::update_counters() {
9313   if (UsePerfData) {
9314     _space_counters->update_all();
9315     _gen_counters->update_all();
9316     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9317     GenCollectedHeap* gch = GenCollectedHeap::heap();
9318     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9319     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9320       "Wrong gc statistics type");
9321     counters->update_counters(gc_stats_l);
9322   }
9323 }
9324 
9325 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9326   if (UsePerfData) {
9327     _space_counters->update_used(used);
9328     _space_counters->update_capacity();
9329     _gen_counters->update_all();
9330 
9331     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9332     GenCollectedHeap* gch = GenCollectedHeap::heap();
9333     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9334     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9335       "Wrong gc statistics type");
9336     counters->update_counters(gc_stats_l);
9337   }
9338 }
9339 
9340 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9341   assert_locked_or_safepoint(Heap_lock);
9342   assert_lock_strong(freelistLock());
9343   HeapWord* old_end = _cmsSpace->end();
9344   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9345   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9346   FreeChunk* chunk_at_end = find_chunk_at_end();
9347   if (chunk_at_end == NULL) {
9348     // No room to shrink
9349     if (PrintGCDetails && Verbose) {
9350       gclog_or_tty->print_cr("No room to shrink: old_end  "
9351         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9352         " chunk_at_end  " PTR_FORMAT,
9353         old_end, unallocated_start, chunk_at_end);
9354     }
9355     return;
9356   } else {
9357 
9358     // Find the chunk at the end of the space and determine
9359     // how much it can be shrunk.
9360     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9361     size_t aligned_shrinkable_size_in_bytes =
9362       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9363     assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9364       "Inconsistent chunk at end of space");
9365     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9366     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9367 
9368     // Shrink the underlying space
9369     _virtual_space.shrink_by(bytes);
9370     if (PrintGCDetails && Verbose) {
9371       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9372         " desired_bytes " SIZE_FORMAT
9373         " shrinkable_size_in_bytes " SIZE_FORMAT
9374         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9375         "  bytes  " SIZE_FORMAT,
9376         desired_bytes, shrinkable_size_in_bytes,
9377         aligned_shrinkable_size_in_bytes, bytes);
9378       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9379         "  unallocated_start  " SIZE_FORMAT,
9380         old_end, unallocated_start);
9381     }
9382 
9383     // If the space did shrink (shrinking is not guaranteed),
9384     // shrink the chunk at the end by the appropriate amount.
9385     if (((HeapWord*)_virtual_space.high()) < old_end) {
9386       size_t new_word_size =
9387         heap_word_size(_virtual_space.committed_size());
9388 
9389       // Have to remove the chunk from the dictionary because it is changing
9390       // size and might be someplace elsewhere in the dictionary.
9391 
9392       // Get the chunk at end, shrink it, and put it
9393       // back.
9394       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9395       size_t word_size_change = word_size_before - new_word_size;
9396       size_t chunk_at_end_old_size = chunk_at_end->size();
9397       assert(chunk_at_end_old_size >= word_size_change,
9398         "Shrink is too large");
9399       chunk_at_end->set_size(chunk_at_end_old_size -
9400                           word_size_change);
9401       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9402         word_size_change);
9403 
9404       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9405 
9406       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9407       _bts->resize(new_word_size);  // resize the block offset shared array
9408       Universe::heap()->barrier_set()->resize_covered_region(mr);
9409       _cmsSpace->assert_locked();
9410       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9411 
9412       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9413 
9414       // update the space and generation capacity counters
9415       if (UsePerfData) {
9416         _space_counters->update_capacity();
9417         _gen_counters->update_all();
9418       }
9419 
9420       if (Verbose && PrintGCDetails) {
9421         size_t new_mem_size = _virtual_space.committed_size();
9422         size_t old_mem_size = new_mem_size + bytes;
9423         gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9424                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9425       }
9426     }
9427 
9428     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9429       "Inconsistency at end of space");
9430     assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9431       "Shrinking is inconsistent");
9432     return;
9433   }
9434 }
9435 
9436 // Transfer some number of overflown objects to usual marking
9437 // stack. Return true if some objects were transferred.
9438 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9439   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9440                     (size_t)ParGCDesiredObjsFromOverflowList);
9441 
9442   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9443   assert(_collector->overflow_list_is_empty() || res,
9444          "If list is not empty, we should have taken something");
9445   assert(!res || !_mark_stack->isEmpty(),
9446          "If we took something, it should now be on our stack");
9447   return res;
9448 }
9449 
9450 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9451   size_t res = _sp->block_size_no_stall(addr, _collector);
9452   if (_sp->block_is_obj(addr)) {
9453     if (_live_bit_map->isMarked(addr)) {
9454       // It can't have been dead in a previous cycle
9455       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9456     } else {
9457       _dead_bit_map->mark(addr);      // mark the dead object
9458     }
9459   }
9460   // Could be 0, if the block size could not be computed without stalling.
9461   return res;
9462 }
9463 
9464 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9465 
9466   switch (phase) {
9467     case CMSCollector::InitialMarking:
9468       initialize(true  /* fullGC */ ,
9469                  cause /* cause of the GC */,
9470                  true  /* recordGCBeginTime */,
9471                  true  /* recordPreGCUsage */,
9472                  false /* recordPeakUsage */,
9473                  false /* recordPostGCusage */,
9474                  true  /* recordAccumulatedGCTime */,
9475                  false /* recordGCEndTime */,
9476                  false /* countCollection */  );
9477       break;
9478 
9479     case CMSCollector::FinalMarking:
9480       initialize(true  /* fullGC */ ,
9481                  cause /* cause of the GC */,
9482                  false /* recordGCBeginTime */,
9483                  false /* recordPreGCUsage */,
9484                  false /* recordPeakUsage */,
9485                  false /* recordPostGCusage */,
9486                  true  /* recordAccumulatedGCTime */,
9487                  false /* recordGCEndTime */,
9488                  false /* countCollection */  );
9489       break;
9490 
9491     case CMSCollector::Sweeping:
9492       initialize(true  /* fullGC */ ,
9493                  cause /* cause of the GC */,
9494                  false /* recordGCBeginTime */,
9495                  false /* recordPreGCUsage */,
9496                  true  /* recordPeakUsage */,
9497                  true  /* recordPostGCusage */,
9498                  false /* recordAccumulatedGCTime */,
9499                  true  /* recordGCEndTime */,
9500                  true  /* countCollection */  );
9501       break;
9502 
9503     default:
9504       ShouldNotReachHere();
9505   }
9506 }