1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  37 #include "gc_implementation/parNew/parNewGeneration.hpp"
  38 #include "gc_implementation/shared/collectorCounters.hpp"
  39 #include "gc_implementation/shared/gcTimer.hpp"
  40 #include "gc_implementation/shared/gcTrace.hpp"
  41 #include "gc_implementation/shared/gcTraceTime.hpp"
  42 #include "gc_implementation/shared/isGCActiveMark.hpp"
  43 #include "gc_interface/collectedHeap.inline.hpp"
  44 #include "memory/allocation.hpp"
  45 #include "memory/cardTableRS.hpp"
  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool          CMSCollector::_full_gc_requested          = false;
  66 
  67 //////////////////////////////////////////////////////////////////
  68 // In support of CMS/VM thread synchronization
  69 //////////////////////////////////////////////////////////////////
  70 // We split use of the CGC_lock into 2 "levels".
  71 // The low-level locking is of the usual CGC_lock monitor. We introduce
  72 // a higher level "token" (hereafter "CMS token") built on top of the
  73 // low level monitor (hereafter "CGC lock").
  74 // The token-passing protocol gives priority to the VM thread. The
  75 // CMS-lock doesn't provide any fairness guarantees, but clients
  76 // should ensure that it is only held for very short, bounded
  77 // durations.
  78 //
  79 // When either of the CMS thread or the VM thread is involved in
  80 // collection operations during which it does not want the other
  81 // thread to interfere, it obtains the CMS token.
  82 //
  83 // If either thread tries to get the token while the other has
  84 // it, that thread waits. However, if the VM thread and CMS thread
  85 // both want the token, then the VM thread gets priority while the
  86 // CMS thread waits. This ensures, for instance, that the "concurrent"
  87 // phases of the CMS thread's work do not block out the VM thread
  88 // for long periods of time as the CMS thread continues to hog
  89 // the token. (See bug 4616232).
  90 //
  91 // The baton-passing functions are, however, controlled by the
  92 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  93 // and here the low-level CMS lock, not the high level token,
  94 // ensures mutual exclusion.
  95 //
  96 // Two important conditions that we have to satisfy:
  97 // 1. if a thread does a low-level wait on the CMS lock, then it
  98 //    relinquishes the CMS token if it were holding that token
  99 //    when it acquired the low-level CMS lock.
 100 // 2. any low-level notifications on the low-level lock
 101 //    should only be sent when a thread has relinquished the token.
 102 //
 103 // In the absence of either property, we'd have potential deadlock.
 104 //
 105 // We protect each of the CMS (concurrent and sequential) phases
 106 // with the CMS _token_, not the CMS _lock_.
 107 //
 108 // The only code protected by CMS lock is the token acquisition code
 109 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 110 // baton-passing code.
 111 //
 112 // Unfortunately, i couldn't come up with a good abstraction to factor and
 113 // hide the naked CGC_lock manipulation in the baton-passing code
 114 // further below. That's something we should try to do. Also, the proof
 115 // of correctness of this 2-level locking scheme is far from obvious,
 116 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 117 // that there may be a theoretical possibility of delay/starvation in the
 118 // low-level lock/wait/notify scheme used for the baton-passing because of
 119 // potential intereference with the priority scheme embodied in the
 120 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 121 // invocation further below and marked with "XXX 20011219YSR".
 122 // Indeed, as we note elsewhere, this may become yet more slippery
 123 // in the presence of multiple CMS and/or multiple VM threads. XXX
 124 
 125 class CMSTokenSync: public StackObj {
 126  private:
 127   bool _is_cms_thread;
 128  public:
 129   CMSTokenSync(bool is_cms_thread):
 130     _is_cms_thread(is_cms_thread) {
 131     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 132            "Incorrect argument to constructor");
 133     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 134   }
 135 
 136   ~CMSTokenSync() {
 137     assert(_is_cms_thread ?
 138              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 139              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 140           "Incorrect state");
 141     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 142   }
 143 };
 144 
 145 // Convenience class that does a CMSTokenSync, and then acquires
 146 // upto three locks.
 147 class CMSTokenSyncWithLocks: public CMSTokenSync {
 148  private:
 149   // Note: locks are acquired in textual declaration order
 150   // and released in the opposite order
 151   MutexLockerEx _locker1, _locker2, _locker3;
 152  public:
 153   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 154                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 155     CMSTokenSync(is_cms_thread),
 156     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 157     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 158     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 159   { }
 160 };
 161 
 162 
 163 // Wrapper class to temporarily disable icms during a foreground cms collection.
 164 class ICMSDisabler: public StackObj {
 165  public:
 166   // The ctor disables icms and wakes up the thread so it notices the change;
 167   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 168   // CMSIncrementalMode.
 169   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 170   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 171 };
 172 
 173 //////////////////////////////////////////////////////////////////
 174 //  Concurrent Mark-Sweep Generation /////////////////////////////
 175 //////////////////////////////////////////////////////////////////
 176 
 177 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 178 
 179 // This struct contains per-thread things necessary to support parallel
 180 // young-gen collection.
 181 class CMSParGCThreadState: public CHeapObj<mtGC> {
 182  public:
 183   CFLS_LAB lab;
 184   PromotionInfo promo;
 185 
 186   // Constructor.
 187   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 188     promo.setSpace(cfls);
 189   }
 190 };
 191 
 192 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 193      ReservedSpace rs, size_t initial_byte_size, int level,
 194      CardTableRS* ct, bool use_adaptive_freelists,
 195      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 196   CardGeneration(rs, initial_byte_size, level, ct),
 197   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 198   _debug_collection_type(Concurrent_collection_type)
 199 {
 200   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 201   HeapWord* end    = (HeapWord*) _virtual_space.high();
 202 
 203   _direct_allocated_words = 0;
 204   NOT_PRODUCT(
 205     _numObjectsPromoted = 0;
 206     _numWordsPromoted = 0;
 207     _numObjectsAllocated = 0;
 208     _numWordsAllocated = 0;
 209   )
 210 
 211   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 212                                            use_adaptive_freelists,
 213                                            dictionaryChoice);
 214   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 215   if (_cmsSpace == NULL) {
 216     vm_exit_during_initialization(
 217       "CompactibleFreeListSpace allocation failure");
 218   }
 219   _cmsSpace->_gen = this;
 220 
 221   _gc_stats = new CMSGCStats();
 222 
 223   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 224   // offsets match. The ability to tell free chunks from objects
 225   // depends on this property.
 226   debug_only(
 227     FreeChunk* junk = NULL;
 228     assert(UseCompressedOops ||
 229            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 230            "Offset of FreeChunk::_prev within FreeChunk must match"
 231            "  that of OopDesc::_klass within OopDesc");
 232   )
 233   if (CollectedHeap::use_parallel_gc_threads()) {
 234     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 235     _par_gc_thread_states =
 236       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 237     if (_par_gc_thread_states == NULL) {
 238       vm_exit_during_initialization("Could not allocate par gc structs");
 239     }
 240     for (uint i = 0; i < ParallelGCThreads; i++) {
 241       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 242       if (_par_gc_thread_states[i] == NULL) {
 243         vm_exit_during_initialization("Could not allocate par gc structs");
 244       }
 245     }
 246   } else {
 247     _par_gc_thread_states = NULL;
 248   }
 249   _incremental_collection_failed = false;
 250   // The "dilatation_factor" is the expansion that can occur on
 251   // account of the fact that the minimum object size in the CMS
 252   // generation may be larger than that in, say, a contiguous young
 253   //  generation.
 254   // Ideally, in the calculation below, we'd compute the dilatation
 255   // factor as: MinChunkSize/(promoting_gen's min object size)
 256   // Since we do not have such a general query interface for the
 257   // promoting generation, we'll instead just use the mimimum
 258   // object size (which today is a header's worth of space);
 259   // note that all arithmetic is in units of HeapWords.
 260   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 261   assert(_dilatation_factor >= 1.0, "from previous assert");
 262 }
 263 
 264 
 265 // The field "_initiating_occupancy" represents the occupancy percentage
 266 // at which we trigger a new collection cycle.  Unless explicitly specified
 267 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
 268 // is calculated by:
 269 //
 270 //   Let "f" be MinHeapFreeRatio in
 271 //
 272 //    _intiating_occupancy = 100-f +
 273 //                           f * (CMSTrigger[Perm]Ratio/100)
 274 //   where CMSTrigger[Perm]Ratio is the argument "tr" below.
 275 //
 276 // That is, if we assume the heap is at its desired maximum occupancy at the
 277 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
 278 // space be allocated before initiating a new collection cycle.
 279 //
 280 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 281   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 282   if (io >= 0) {
 283     _initiating_occupancy = (double)io / 100.0;
 284   } else {
 285     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 286                              (double)(tr * MinHeapFreeRatio) / 100.0)
 287                             / 100.0;
 288   }
 289 }
 290 
 291 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 292   assert(collector() != NULL, "no collector");
 293   collector()->ref_processor_init();
 294 }
 295 
 296 void CMSCollector::ref_processor_init() {
 297   if (_ref_processor == NULL) {
 298     // Allocate and initialize a reference processor
 299     _ref_processor =
 300       new ReferenceProcessor(_span,                               // span
 301                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 302                              (int) ParallelGCThreads,             // mt processing degree
 303                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 304                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 305                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 306                              &_is_alive_closure,                  // closure for liveness info
 307                              false);                              // next field updates do not need write barrier
 308     // Initialize the _ref_processor field of CMSGen
 309     _cmsGen->set_ref_processor(_ref_processor);
 310 
 311     // Allocate a dummy ref processor for perm gen.
 312     ReferenceProcessor* rp2 = new ReferenceProcessor();
 313     if (rp2 == NULL) {
 314       vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
 315     }
 316     _permGen->set_ref_processor(rp2);
 317   }
 318 }
 319 
 320 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 321   GenCollectedHeap* gch = GenCollectedHeap::heap();
 322   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 323     "Wrong type of heap");
 324   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 325     gch->gen_policy()->size_policy();
 326   assert(sp->is_gc_cms_adaptive_size_policy(),
 327     "Wrong type of size policy");
 328   return sp;
 329 }
 330 
 331 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 332   CMSGCAdaptivePolicyCounters* results =
 333     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 334   assert(
 335     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 336     "Wrong gc policy counter kind");
 337   return results;
 338 }
 339 
 340 
 341 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 342 
 343   const char* gen_name = "old";
 344 
 345   // Generation Counters - generation 1, 1 subspace
 346   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 347 
 348   _space_counters = new GSpaceCounters(gen_name, 0,
 349                                        _virtual_space.reserved_size(),
 350                                        this, _gen_counters);
 351 }
 352 
 353 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 354   _cms_gen(cms_gen)
 355 {
 356   assert(alpha <= 100, "bad value");
 357   _saved_alpha = alpha;
 358 
 359   // Initialize the alphas to the bootstrap value of 100.
 360   _gc0_alpha = _cms_alpha = 100;
 361 
 362   _cms_begin_time.update();
 363   _cms_end_time.update();
 364 
 365   _gc0_duration = 0.0;
 366   _gc0_period = 0.0;
 367   _gc0_promoted = 0;
 368 
 369   _cms_duration = 0.0;
 370   _cms_period = 0.0;
 371   _cms_allocated = 0;
 372 
 373   _cms_used_at_gc0_begin = 0;
 374   _cms_used_at_gc0_end = 0;
 375   _allow_duty_cycle_reduction = false;
 376   _valid_bits = 0;
 377   _icms_duty_cycle = CMSIncrementalDutyCycle;
 378 }
 379 
 380 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 381   // TBD: CR 6909490
 382   return 1.0;
 383 }
 384 
 385 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 386 }
 387 
 388 // If promotion failure handling is on use
 389 // the padded average size of the promotion for each
 390 // young generation collection.
 391 double CMSStats::time_until_cms_gen_full() const {
 392   size_t cms_free = _cms_gen->cmsSpace()->free();
 393   GenCollectedHeap* gch = GenCollectedHeap::heap();
 394   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 395                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 396   if (cms_free > expected_promotion) {
 397     // Start a cms collection if there isn't enough space to promote
 398     // for the next minor collection.  Use the padded average as
 399     // a safety factor.
 400     cms_free -= expected_promotion;
 401 
 402     // Adjust by the safety factor.
 403     double cms_free_dbl = (double)cms_free;
 404     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 405     // Apply a further correction factor which tries to adjust
 406     // for recent occurance of concurrent mode failures.
 407     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 408     cms_free_dbl = cms_free_dbl * cms_adjustment;
 409 
 410     if (PrintGCDetails && Verbose) {
 411       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 412         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 413         cms_free, expected_promotion);
 414       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 415         cms_free_dbl, cms_consumption_rate() + 1.0);
 416     }
 417     // Add 1 in case the consumption rate goes to zero.
 418     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 419   }
 420   return 0.0;
 421 }
 422 
 423 // Compare the duration of the cms collection to the
 424 // time remaining before the cms generation is empty.
 425 // Note that the time from the start of the cms collection
 426 // to the start of the cms sweep (less than the total
 427 // duration of the cms collection) can be used.  This
 428 // has been tried and some applications experienced
 429 // promotion failures early in execution.  This was
 430 // possibly because the averages were not accurate
 431 // enough at the beginning.
 432 double CMSStats::time_until_cms_start() const {
 433   // We add "gc0_period" to the "work" calculation
 434   // below because this query is done (mostly) at the
 435   // end of a scavenge, so we need to conservatively
 436   // account for that much possible delay
 437   // in the query so as to avoid concurrent mode failures
 438   // due to starting the collection just a wee bit too
 439   // late.
 440   double work = cms_duration() + gc0_period();
 441   double deadline = time_until_cms_gen_full();
 442   // If a concurrent mode failure occurred recently, we want to be
 443   // more conservative and halve our expected time_until_cms_gen_full()
 444   if (work > deadline) {
 445     if (Verbose && PrintGCDetails) {
 446       gclog_or_tty->print(
 447         " CMSCollector: collect because of anticipated promotion "
 448         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 449         gc0_period(), time_until_cms_gen_full());
 450     }
 451     return 0.0;
 452   }
 453   return work - deadline;
 454 }
 455 
 456 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 457 // amount of change to prevent wild oscillation.
 458 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 459                                               unsigned int new_duty_cycle) {
 460   assert(old_duty_cycle <= 100, "bad input value");
 461   assert(new_duty_cycle <= 100, "bad input value");
 462 
 463   // Note:  use subtraction with caution since it may underflow (values are
 464   // unsigned).  Addition is safe since we're in the range 0-100.
 465   unsigned int damped_duty_cycle = new_duty_cycle;
 466   if (new_duty_cycle < old_duty_cycle) {
 467     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 468     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 469       damped_duty_cycle = old_duty_cycle - largest_delta;
 470     }
 471   } else if (new_duty_cycle > old_duty_cycle) {
 472     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 473     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 474       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 475     }
 476   }
 477   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 478 
 479   if (CMSTraceIncrementalPacing) {
 480     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 481                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 482   }
 483   return damped_duty_cycle;
 484 }
 485 
 486 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 487   assert(CMSIncrementalPacing && valid(),
 488          "should be handled in icms_update_duty_cycle()");
 489 
 490   double cms_time_so_far = cms_timer().seconds();
 491   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 492   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 493 
 494   // Avoid division by 0.
 495   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 496   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 497 
 498   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 499   if (new_duty_cycle > _icms_duty_cycle) {
 500     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 501     if (new_duty_cycle > 2) {
 502       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 503                                                 new_duty_cycle);
 504     }
 505   } else if (_allow_duty_cycle_reduction) {
 506     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 507     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 508     // Respect the minimum duty cycle.
 509     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 510     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 511   }
 512 
 513   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 514     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 515   }
 516 
 517   _allow_duty_cycle_reduction = false;
 518   return _icms_duty_cycle;
 519 }
 520 
 521 #ifndef PRODUCT
 522 void CMSStats::print_on(outputStream *st) const {
 523   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 524   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 525                gc0_duration(), gc0_period(), gc0_promoted());
 526   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 527             cms_duration(), cms_duration_per_mb(),
 528             cms_period(), cms_allocated());
 529   st->print(",cms_since_beg=%g,cms_since_end=%g",
 530             cms_time_since_begin(), cms_time_since_end());
 531   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 532             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 533   if (CMSIncrementalMode) {
 534     st->print(",dc=%d", icms_duty_cycle());
 535   }
 536 
 537   if (valid()) {
 538     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 539               promotion_rate(), cms_allocation_rate());
 540     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 541               cms_consumption_rate(), time_until_cms_gen_full());
 542   }
 543   st->print(" ");
 544 }
 545 #endif // #ifndef PRODUCT
 546 
 547 CMSCollector::CollectorState CMSCollector::_collectorState =
 548                              CMSCollector::Idling;
 549 bool CMSCollector::_foregroundGCIsActive = false;
 550 bool CMSCollector::_foregroundGCShouldWait = false;
 551 
 552 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 553                            ConcurrentMarkSweepGeneration* permGen,
 554                            CardTableRS*                   ct,
 555                            ConcurrentMarkSweepPolicy*     cp):
 556   _cmsGen(cmsGen),
 557   _permGen(permGen),
 558   _ct(ct),
 559   _ref_processor(NULL),    // will be set later
 560   _conc_workers(NULL),     // may be set later
 561   _abort_preclean(false),
 562   _start_sampling(false),
 563   _between_prologue_and_epilogue(false),
 564   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 565   _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
 566   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 567                  -1 /* lock-free */, "No_lock" /* dummy */),
 568   _modUnionClosure(&_modUnionTable),
 569   _modUnionClosurePar(&_modUnionTable),
 570   // Adjust my span to cover old (cms) gen and perm gen
 571   _span(cmsGen->reserved()._union(permGen->reserved())),
 572   // Construct the is_alive_closure with _span & markBitMap
 573   _is_alive_closure(_span, &_markBitMap),
 574   _restart_addr(NULL),
 575   _overflow_list(NULL),
 576   _stats(cmsGen),
 577   _eden_chunk_array(NULL),     // may be set in ctor body
 578   _eden_chunk_capacity(0),     // -- ditto --
 579   _eden_chunk_index(0),        // -- ditto --
 580   _survivor_plab_array(NULL),  // -- ditto --
 581   _survivor_chunk_array(NULL), // -- ditto --
 582   _survivor_chunk_capacity(0), // -- ditto --
 583   _survivor_chunk_index(0),    // -- ditto --
 584   _ser_pmc_preclean_ovflw(0),
 585   _ser_kac_preclean_ovflw(0),
 586   _ser_pmc_remark_ovflw(0),
 587   _par_pmc_remark_ovflw(0),
 588   _ser_kac_ovflw(0),
 589   _par_kac_ovflw(0),
 590 #ifndef PRODUCT
 591   _num_par_pushes(0),
 592 #endif
 593   _collection_count_start(0),
 594   _verifying(false),
 595   _icms_start_limit(NULL),
 596   _icms_stop_limit(NULL),
 597   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 598   _completed_initialization(false),
 599   _collector_policy(cp),
 600   _should_unload_classes(false),
 601   _concurrent_cycles_since_last_unload(0),
 602   _roots_scanning_options(0),
 603   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 604   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 605   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 606   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 607   _cms_start_registered(false)
 608 {
 609   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 610     ExplicitGCInvokesConcurrent = true;
 611   }
 612   // Now expand the span and allocate the collection support structures
 613   // (MUT, marking bit map etc.) to cover both generations subject to
 614   // collection.
 615 
 616   // First check that _permGen is adjacent to _cmsGen and above it.
 617   assert(   _cmsGen->reserved().word_size()  > 0
 618          && _permGen->reserved().word_size() > 0,
 619          "generations should not be of zero size");
 620   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
 621          "_cmsGen and _permGen should not overlap");
 622   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
 623          "_cmsGen->end() different from _permGen->start()");
 624 
 625   // For use by dirty card to oop closures.
 626   _cmsGen->cmsSpace()->set_collector(this);
 627   _permGen->cmsSpace()->set_collector(this);
 628 
 629   // Allocate MUT and marking bit map
 630   {
 631     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 632     if (!_markBitMap.allocate(_span)) {
 633       warning("Failed to allocate CMS Bit Map");
 634       return;
 635     }
 636     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 637   }
 638   {
 639     _modUnionTable.allocate(_span);
 640     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 641   }
 642 
 643   if (!_markStack.allocate(MarkStackSize)) {
 644     warning("Failed to allocate CMS Marking Stack");
 645     return;
 646   }
 647   if (!_revisitStack.allocate(CMSRevisitStackSize)) {
 648     warning("Failed to allocate CMS Revisit Stack");
 649     return;
 650   }
 651 
 652   // Support for multi-threaded concurrent phases
 653   if (CMSConcurrentMTEnabled) {
 654     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 655       // just for now
 656       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 657     }
 658     if (ConcGCThreads > 1) {
 659       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 660                                  ConcGCThreads, true);
 661       if (_conc_workers == NULL) {
 662         warning("GC/CMS: _conc_workers allocation failure: "
 663               "forcing -CMSConcurrentMTEnabled");
 664         CMSConcurrentMTEnabled = false;
 665       } else {
 666         _conc_workers->initialize_workers();
 667       }
 668     } else {
 669       CMSConcurrentMTEnabled = false;
 670     }
 671   }
 672   if (!CMSConcurrentMTEnabled) {
 673     ConcGCThreads = 0;
 674   } else {
 675     // Turn off CMSCleanOnEnter optimization temporarily for
 676     // the MT case where it's not fixed yet; see 6178663.
 677     CMSCleanOnEnter = false;
 678   }
 679   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 680          "Inconsistency");
 681 
 682   // Parallel task queues; these are shared for the
 683   // concurrent and stop-world phases of CMS, but
 684   // are not shared with parallel scavenge (ParNew).
 685   {
 686     uint i;
 687     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 688 
 689     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 690          || ParallelRefProcEnabled)
 691         && num_queues > 0) {
 692       _task_queues = new OopTaskQueueSet(num_queues);
 693       if (_task_queues == NULL) {
 694         warning("task_queues allocation failure.");
 695         return;
 696       }
 697       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 698       if (_hash_seed == NULL) {
 699         warning("_hash_seed array allocation failure");
 700         return;
 701       }
 702 
 703       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 704       for (i = 0; i < num_queues; i++) {
 705         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 706         if (q == NULL) {
 707           warning("work_queue allocation failure.");
 708           return;
 709         }
 710         _task_queues->register_queue(i, q);
 711       }
 712       for (i = 0; i < num_queues; i++) {
 713         _task_queues->queue(i)->initialize();
 714         _hash_seed[i] = 17;  // copied from ParNew
 715       }
 716     }
 717   }
 718 
 719   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 720   _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
 721 
 722   // Clip CMSBootstrapOccupancy between 0 and 100.
 723   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 724                          /(double)100;
 725 
 726   _full_gcs_since_conc_gc = 0;
 727 
 728   // Now tell CMS generations the identity of their collector
 729   ConcurrentMarkSweepGeneration::set_collector(this);
 730 
 731   // Create & start a CMS thread for this CMS collector
 732   _cmsThread = ConcurrentMarkSweepThread::start(this);
 733   assert(cmsThread() != NULL, "CMS Thread should have been created");
 734   assert(cmsThread()->collector() == this,
 735          "CMS Thread should refer to this gen");
 736   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 737 
 738   // Support for parallelizing young gen rescan
 739   GenCollectedHeap* gch = GenCollectedHeap::heap();
 740   _young_gen = gch->prev_gen(_cmsGen);
 741   if (gch->supports_inline_contig_alloc()) {
 742     _top_addr = gch->top_addr();
 743     _end_addr = gch->end_addr();
 744     assert(_young_gen != NULL, "no _young_gen");
 745     _eden_chunk_index = 0;
 746     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 747     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 748     if (_eden_chunk_array == NULL) {
 749       _eden_chunk_capacity = 0;
 750       warning("GC/CMS: _eden_chunk_array allocation failure");
 751     }
 752   }
 753   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 754 
 755   // Support for parallelizing survivor space rescan
 756   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
 757     const size_t max_plab_samples =
 758       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 759 
 760     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 761     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 762     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 763     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 764         || _cursor == NULL) {
 765       warning("Failed to allocate survivor plab/chunk array");
 766       if (_survivor_plab_array  != NULL) {
 767         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 768         _survivor_plab_array = NULL;
 769       }
 770       if (_survivor_chunk_array != NULL) {
 771         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 772         _survivor_chunk_array = NULL;
 773       }
 774       if (_cursor != NULL) {
 775         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 776         _cursor = NULL;
 777       }
 778     } else {
 779       _survivor_chunk_capacity = 2*max_plab_samples;
 780       for (uint i = 0; i < ParallelGCThreads; i++) {
 781         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 782         if (vec == NULL) {
 783           warning("Failed to allocate survivor plab array");
 784           for (int j = i; j > 0; j--) {
 785             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 786           }
 787           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 788           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 789           _survivor_plab_array = NULL;
 790           _survivor_chunk_array = NULL;
 791           _survivor_chunk_capacity = 0;
 792           break;
 793         } else {
 794           ChunkArray* cur =
 795             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 796                                                         max_plab_samples);
 797           assert(cur->end() == 0, "Should be 0");
 798           assert(cur->array() == vec, "Should be vec");
 799           assert(cur->capacity() == max_plab_samples, "Error");
 800         }
 801       }
 802     }
 803   }
 804   assert(   (   _survivor_plab_array  != NULL
 805              && _survivor_chunk_array != NULL)
 806          || (   _survivor_chunk_capacity == 0
 807              && _survivor_chunk_index == 0),
 808          "Error");
 809 
 810   // Choose what strong roots should be scanned depending on verification options
 811   // and perm gen collection mode.
 812   if (!CMSClassUnloadingEnabled) {
 813     // If class unloading is disabled we want to include all classes into the root set.
 814     add_root_scanning_option(SharedHeap::SO_AllClasses);
 815   } else {
 816     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 817   }
 818 
 819   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 820   _gc_counters = new CollectorCounters("CMS", 1);
 821   _completed_initialization = true;
 822   _inter_sweep_timer.start();  // start of time
 823 }
 824 
 825 const char* ConcurrentMarkSweepGeneration::name() const {
 826   return "concurrent mark-sweep generation";
 827 }
 828 void ConcurrentMarkSweepGeneration::update_counters() {
 829   if (UsePerfData) {
 830     _space_counters->update_all();
 831     _gen_counters->update_all();
 832   }
 833 }
 834 
 835 // this is an optimized version of update_counters(). it takes the
 836 // used value as a parameter rather than computing it.
 837 //
 838 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 839   if (UsePerfData) {
 840     _space_counters->update_used(used);
 841     _space_counters->update_capacity();
 842     _gen_counters->update_all();
 843   }
 844 }
 845 
 846 void ConcurrentMarkSweepGeneration::print() const {
 847   Generation::print();
 848   cmsSpace()->print();
 849 }
 850 
 851 #ifndef PRODUCT
 852 void ConcurrentMarkSweepGeneration::print_statistics() {
 853   cmsSpace()->printFLCensus(0);
 854 }
 855 #endif
 856 
 857 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 858   GenCollectedHeap* gch = GenCollectedHeap::heap();
 859   if (PrintGCDetails) {
 860     if (Verbose) {
 861       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 862         level(), short_name(), s, used(), capacity());
 863     } else {
 864       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 865         level(), short_name(), s, used() / K, capacity() / K);
 866     }
 867   }
 868   if (Verbose) {
 869     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 870               gch->used(), gch->capacity());
 871   } else {
 872     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 873               gch->used() / K, gch->capacity() / K);
 874   }
 875 }
 876 
 877 size_t
 878 ConcurrentMarkSweepGeneration::contiguous_available() const {
 879   // dld proposes an improvement in precision here. If the committed
 880   // part of the space ends in a free block we should add that to
 881   // uncommitted size in the calculation below. Will make this
 882   // change later, staying with the approximation below for the
 883   // time being. -- ysr.
 884   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 885 }
 886 
 887 size_t
 888 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 889   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 890 }
 891 
 892 size_t ConcurrentMarkSweepGeneration::max_available() const {
 893   return free() + _virtual_space.uncommitted_size();
 894 }
 895 
 896 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 897   size_t available = max_available();
 898   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 899   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 900   if (Verbose && PrintGCDetails) {
 901     gclog_or_tty->print_cr(
 902       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 903       "max_promo("SIZE_FORMAT")",
 904       res? "":" not", available, res? ">=":"<",
 905       av_promo, max_promotion_in_bytes);
 906   }
 907   return res;
 908 }
 909 
 910 // At a promotion failure dump information on block layout in heap
 911 // (cms old generation).
 912 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 913   if (CMSDumpAtPromotionFailure) {
 914     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 915   }
 916 }
 917 
 918 CompactibleSpace*
 919 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 920   return _cmsSpace;
 921 }
 922 
 923 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 924   // Clear the promotion information.  These pointers can be adjusted
 925   // along with all the other pointers into the heap but
 926   // compaction is expected to be a rare event with
 927   // a heap using cms so don't do it without seeing the need.
 928   if (CollectedHeap::use_parallel_gc_threads()) {
 929     for (uint i = 0; i < ParallelGCThreads; i++) {
 930       _par_gc_thread_states[i]->promo.reset();
 931     }
 932   }
 933 }
 934 
 935 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 936   blk->do_space(_cmsSpace);
 937 }
 938 
 939 void ConcurrentMarkSweepGeneration::compute_new_size() {
 940   assert_locked_or_safepoint(Heap_lock);
 941 
 942   // If incremental collection failed, we just want to expand
 943   // to the limit.
 944   if (incremental_collection_failed()) {
 945     clear_incremental_collection_failed();
 946     grow_to_reserved();
 947     return;
 948   }
 949 
 950   size_t expand_bytes = 0;
 951   double free_percentage = ((double) free()) / capacity();
 952   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 953   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 954 
 955   // compute expansion delta needed for reaching desired free percentage
 956   if (free_percentage < desired_free_percentage) {
 957     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 958     assert(desired_capacity >= capacity(), "invalid expansion size");
 959     expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 960   }
 961   if (expand_bytes > 0) {
 962     if (PrintGCDetails && Verbose) {
 963       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 964       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 965       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 966       gclog_or_tty->print_cr("  Desired free fraction %f",
 967         desired_free_percentage);
 968       gclog_or_tty->print_cr("  Maximum free fraction %f",
 969         maximum_free_percentage);
 970       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 971       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 972         desired_capacity/1000);
 973       int prev_level = level() - 1;
 974       if (prev_level >= 0) {
 975         size_t prev_size = 0;
 976         GenCollectedHeap* gch = GenCollectedHeap::heap();
 977         Generation* prev_gen = gch->_gens[prev_level];
 978         prev_size = prev_gen->capacity();
 979           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 980                                  prev_size/1000);
 981       }
 982       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 983         unsafe_max_alloc_nogc()/1000);
 984       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 985         contiguous_available()/1000);
 986       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 987         expand_bytes);
 988     }
 989     // safe if expansion fails
 990     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 991     if (PrintGCDetails && Verbose) {
 992       gclog_or_tty->print_cr("  Expanded free fraction %f",
 993         ((double) free()) / capacity());
 994     }
 995   }
 996 }
 997 
 998 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 999   return cmsSpace()->freelistLock();
1000 }
1001 
1002 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1003                                                   bool   tlab) {
1004   CMSSynchronousYieldRequest yr;
1005   MutexLockerEx x(freelistLock(),
1006                   Mutex::_no_safepoint_check_flag);
1007   return have_lock_and_allocate(size, tlab);
1008 }
1009 
1010 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1011                                                   bool   tlab /* ignored */) {
1012   assert_lock_strong(freelistLock());
1013   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1014   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1015   // Allocate the object live (grey) if the background collector has
1016   // started marking. This is necessary because the marker may
1017   // have passed this address and consequently this object will
1018   // not otherwise be greyed and would be incorrectly swept up.
1019   // Note that if this object contains references, the writing
1020   // of those references will dirty the card containing this object
1021   // allowing the object to be blackened (and its references scanned)
1022   // either during a preclean phase or at the final checkpoint.
1023   if (res != NULL) {
1024     // We may block here with an uninitialized object with
1025     // its mark-bit or P-bits not yet set. Such objects need
1026     // to be safely navigable by block_start().
1027     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1028     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1029     collector()->direct_allocated(res, adjustedSize);
1030     _direct_allocated_words += adjustedSize;
1031     // allocation counters
1032     NOT_PRODUCT(
1033       _numObjectsAllocated++;
1034       _numWordsAllocated += (int)adjustedSize;
1035     )
1036   }
1037   return res;
1038 }
1039 
1040 // In the case of direct allocation by mutators in a generation that
1041 // is being concurrently collected, the object must be allocated
1042 // live (grey) if the background collector has started marking.
1043 // This is necessary because the marker may
1044 // have passed this address and consequently this object will
1045 // not otherwise be greyed and would be incorrectly swept up.
1046 // Note that if this object contains references, the writing
1047 // of those references will dirty the card containing this object
1048 // allowing the object to be blackened (and its references scanned)
1049 // either during a preclean phase or at the final checkpoint.
1050 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1051   assert(_markBitMap.covers(start, size), "Out of bounds");
1052   if (_collectorState >= Marking) {
1053     MutexLockerEx y(_markBitMap.lock(),
1054                     Mutex::_no_safepoint_check_flag);
1055     // [see comments preceding SweepClosure::do_blk() below for details]
1056     // 1. need to mark the object as live so it isn't collected
1057     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1058     // 3. need to mark the end of the object so marking, precleaning or sweeping
1059     //    can skip over uninitialized or unparsable objects. An allocated
1060     //    object is considered uninitialized for our purposes as long as
1061     //    its klass word is NULL. (Unparsable objects are those which are
1062     //    initialized in the sense just described, but whose sizes can still
1063     //    not be correctly determined. Note that the class of unparsable objects
1064     //    can only occur in the perm gen. All old gen objects are parsable
1065     //    as soon as they are initialized.)
1066     _markBitMap.mark(start);          // object is live
1067     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1068     _markBitMap.mark(start + size - 1);
1069                                       // mark end of object
1070   }
1071   // check that oop looks uninitialized
1072   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1073 }
1074 
1075 void CMSCollector::promoted(bool par, HeapWord* start,
1076                             bool is_obj_array, size_t obj_size) {
1077   assert(_markBitMap.covers(start), "Out of bounds");
1078   // See comment in direct_allocated() about when objects should
1079   // be allocated live.
1080   if (_collectorState >= Marking) {
1081     // we already hold the marking bit map lock, taken in
1082     // the prologue
1083     if (par) {
1084       _markBitMap.par_mark(start);
1085     } else {
1086       _markBitMap.mark(start);
1087     }
1088     // We don't need to mark the object as uninitialized (as
1089     // in direct_allocated above) because this is being done with the
1090     // world stopped and the object will be initialized by the
1091     // time the marking, precleaning or sweeping get to look at it.
1092     // But see the code for copying objects into the CMS generation,
1093     // where we need to ensure that concurrent readers of the
1094     // block offset table are able to safely navigate a block that
1095     // is in flux from being free to being allocated (and in
1096     // transition while being copied into) and subsequently
1097     // becoming a bona-fide object when the copy/promotion is complete.
1098     assert(SafepointSynchronize::is_at_safepoint(),
1099            "expect promotion only at safepoints");
1100 
1101     if (_collectorState < Sweeping) {
1102       // Mark the appropriate cards in the modUnionTable, so that
1103       // this object gets scanned before the sweep. If this is
1104       // not done, CMS generation references in the object might
1105       // not get marked.
1106       // For the case of arrays, which are otherwise precisely
1107       // marked, we need to dirty the entire array, not just its head.
1108       if (is_obj_array) {
1109         // The [par_]mark_range() method expects mr.end() below to
1110         // be aligned to the granularity of a bit's representation
1111         // in the heap. In the case of the MUT below, that's a
1112         // card size.
1113         MemRegion mr(start,
1114                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1115                         CardTableModRefBS::card_size /* bytes */));
1116         if (par) {
1117           _modUnionTable.par_mark_range(mr);
1118         } else {
1119           _modUnionTable.mark_range(mr);
1120         }
1121       } else {  // not an obj array; we can just mark the head
1122         if (par) {
1123           _modUnionTable.par_mark(start);
1124         } else {
1125           _modUnionTable.mark(start);
1126         }
1127       }
1128     }
1129   }
1130 }
1131 
1132 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1133 {
1134   size_t delta = pointer_delta(addr, space->bottom());
1135   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1136 }
1137 
1138 void CMSCollector::icms_update_allocation_limits()
1139 {
1140   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1141   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1142 
1143   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1144   if (CMSTraceIncrementalPacing) {
1145     stats().print();
1146   }
1147 
1148   assert(duty_cycle <= 100, "invalid duty cycle");
1149   if (duty_cycle != 0) {
1150     // The duty_cycle is a percentage between 0 and 100; convert to words and
1151     // then compute the offset from the endpoints of the space.
1152     size_t free_words = eden->free() / HeapWordSize;
1153     double free_words_dbl = (double)free_words;
1154     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1155     size_t offset_words = (free_words - duty_cycle_words) / 2;
1156 
1157     _icms_start_limit = eden->top() + offset_words;
1158     _icms_stop_limit = eden->end() - offset_words;
1159 
1160     // The limits may be adjusted (shifted to the right) by
1161     // CMSIncrementalOffset, to allow the application more mutator time after a
1162     // young gen gc (when all mutators were stopped) and before CMS starts and
1163     // takes away one or more cpus.
1164     if (CMSIncrementalOffset != 0) {
1165       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1166       size_t adjustment = (size_t)adjustment_dbl;
1167       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1168       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1169         _icms_start_limit += adjustment;
1170         _icms_stop_limit = tmp_stop;
1171       }
1172     }
1173   }
1174   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1175     _icms_start_limit = _icms_stop_limit = eden->end();
1176   }
1177 
1178   // Install the new start limit.
1179   eden->set_soft_end(_icms_start_limit);
1180 
1181   if (CMSTraceIncrementalMode) {
1182     gclog_or_tty->print(" icms alloc limits:  "
1183                            PTR_FORMAT "," PTR_FORMAT
1184                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1185                            _icms_start_limit, _icms_stop_limit,
1186                            percent_of_space(eden, _icms_start_limit),
1187                            percent_of_space(eden, _icms_stop_limit));
1188     if (Verbose) {
1189       gclog_or_tty->print("eden:  ");
1190       eden->print_on(gclog_or_tty);
1191     }
1192   }
1193 }
1194 
1195 // Any changes here should try to maintain the invariant
1196 // that if this method is called with _icms_start_limit
1197 // and _icms_stop_limit both NULL, then it should return NULL
1198 // and not notify the icms thread.
1199 HeapWord*
1200 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1201                                        size_t word_size)
1202 {
1203   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1204   // nop.
1205   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1206     if (top <= _icms_start_limit) {
1207       if (CMSTraceIncrementalMode) {
1208         space->print_on(gclog_or_tty);
1209         gclog_or_tty->stamp();
1210         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1211                                ", new limit=" PTR_FORMAT
1212                                " (" SIZE_FORMAT "%%)",
1213                                top, _icms_stop_limit,
1214                                percent_of_space(space, _icms_stop_limit));
1215       }
1216       ConcurrentMarkSweepThread::start_icms();
1217       assert(top < _icms_stop_limit, "Tautology");
1218       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1219         return _icms_stop_limit;
1220       }
1221 
1222       // The allocation will cross both the _start and _stop limits, so do the
1223       // stop notification also and return end().
1224       if (CMSTraceIncrementalMode) {
1225         space->print_on(gclog_or_tty);
1226         gclog_or_tty->stamp();
1227         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1228                                ", new limit=" PTR_FORMAT
1229                                " (" SIZE_FORMAT "%%)",
1230                                top, space->end(),
1231                                percent_of_space(space, space->end()));
1232       }
1233       ConcurrentMarkSweepThread::stop_icms();
1234       return space->end();
1235     }
1236 
1237     if (top <= _icms_stop_limit) {
1238       if (CMSTraceIncrementalMode) {
1239         space->print_on(gclog_or_tty);
1240         gclog_or_tty->stamp();
1241         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1242                                ", new limit=" PTR_FORMAT
1243                                " (" SIZE_FORMAT "%%)",
1244                                top, space->end(),
1245                                percent_of_space(space, space->end()));
1246       }
1247       ConcurrentMarkSweepThread::stop_icms();
1248       return space->end();
1249     }
1250 
1251     if (CMSTraceIncrementalMode) {
1252       space->print_on(gclog_or_tty);
1253       gclog_or_tty->stamp();
1254       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1255                              ", new limit=" PTR_FORMAT,
1256                              top, NULL);
1257     }
1258   }
1259 
1260   return NULL;
1261 }
1262 
1263 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1264   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1265   // allocate, copy and if necessary update promoinfo --
1266   // delegate to underlying space.
1267   assert_lock_strong(freelistLock());
1268 
1269 #ifndef PRODUCT
1270   if (Universe::heap()->promotion_should_fail()) {
1271     return NULL;
1272   }
1273 #endif  // #ifndef PRODUCT
1274 
1275   oop res = _cmsSpace->promote(obj, obj_size);
1276   if (res == NULL) {
1277     // expand and retry
1278     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1279     expand(s*HeapWordSize, MinHeapDeltaBytes,
1280       CMSExpansionCause::_satisfy_promotion);
1281     // Since there's currently no next generation, we don't try to promote
1282     // into a more senior generation.
1283     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1284                                "is made to pass on a possibly failing "
1285                                "promotion to next generation");
1286     res = _cmsSpace->promote(obj, obj_size);
1287   }
1288   if (res != NULL) {
1289     // See comment in allocate() about when objects should
1290     // be allocated live.
1291     assert(obj->is_oop(), "Will dereference klass pointer below");
1292     collector()->promoted(false,           // Not parallel
1293                           (HeapWord*)res, obj->is_objArray(), obj_size);
1294     // promotion counters
1295     NOT_PRODUCT(
1296       _numObjectsPromoted++;
1297       _numWordsPromoted +=
1298         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1299     )
1300   }
1301   return res;
1302 }
1303 
1304 
1305 HeapWord*
1306 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1307                                              HeapWord* top,
1308                                              size_t word_sz)
1309 {
1310   return collector()->allocation_limit_reached(space, top, word_sz);
1311 }
1312 
1313 // IMPORTANT: Notes on object size recognition in CMS.
1314 // ---------------------------------------------------
1315 // A block of storage in the CMS generation is always in
1316 // one of three states. A free block (FREE), an allocated
1317 // object (OBJECT) whose size() method reports the correct size,
1318 // and an intermediate state (TRANSIENT) in which its size cannot
1319 // be accurately determined.
1320 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1321 // -----------------------------------------------------
1322 // FREE:      klass_word & 1 == 1; mark_word holds block size
1323 //
1324 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1325 //            obj->size() computes correct size
1326 //            [Perm Gen objects needs to be "parsable" before they can be navigated]
1327 //
1328 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1329 //
1330 // STATE IDENTIFICATION: (64 bit+COOPS)
1331 // ------------------------------------
1332 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1333 //
1334 // OBJECT:    klass_word installed; klass_word != 0;
1335 //            obj->size() computes correct size
1336 //            [Perm Gen comment above continues to hold]
1337 //
1338 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1339 //
1340 //
1341 // STATE TRANSITION DIAGRAM
1342 //
1343 //        mut / parnew                     mut  /  parnew
1344 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1345 //  ^                                                                   |
1346 //  |------------------------ DEAD <------------------------------------|
1347 //         sweep                            mut
1348 //
1349 // While a block is in TRANSIENT state its size cannot be determined
1350 // so readers will either need to come back later or stall until
1351 // the size can be determined. Note that for the case of direct
1352 // allocation, P-bits, when available, may be used to determine the
1353 // size of an object that may not yet have been initialized.
1354 
1355 // Things to support parallel young-gen collection.
1356 oop
1357 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1358                                            oop old, markOop m,
1359                                            size_t word_sz) {
1360 #ifndef PRODUCT
1361   if (Universe::heap()->promotion_should_fail()) {
1362     return NULL;
1363   }
1364 #endif  // #ifndef PRODUCT
1365 
1366   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1367   PromotionInfo* promoInfo = &ps->promo;
1368   // if we are tracking promotions, then first ensure space for
1369   // promotion (including spooling space for saving header if necessary).
1370   // then allocate and copy, then track promoted info if needed.
1371   // When tracking (see PromotionInfo::track()), the mark word may
1372   // be displaced and in this case restoration of the mark word
1373   // occurs in the (oop_since_save_marks_)iterate phase.
1374   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1375     // Out of space for allocating spooling buffers;
1376     // try expanding and allocating spooling buffers.
1377     if (!expand_and_ensure_spooling_space(promoInfo)) {
1378       return NULL;
1379     }
1380   }
1381   assert(promoInfo->has_spooling_space(), "Control point invariant");
1382   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1383   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1384   if (obj_ptr == NULL) {
1385      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1386      if (obj_ptr == NULL) {
1387        return NULL;
1388      }
1389   }
1390   oop obj = oop(obj_ptr);
1391   OrderAccess::storestore();
1392   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1393   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1394   // IMPORTANT: See note on object initialization for CMS above.
1395   // Otherwise, copy the object.  Here we must be careful to insert the
1396   // klass pointer last, since this marks the block as an allocated object.
1397   // Except with compressed oops it's the mark word.
1398   HeapWord* old_ptr = (HeapWord*)old;
1399   // Restore the mark word copied above.
1400   obj->set_mark(m);
1401   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1402   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1403   OrderAccess::storestore();
1404 
1405   if (UseCompressedOops) {
1406     // Copy gap missed by (aligned) header size calculation below
1407     obj->set_klass_gap(old->klass_gap());
1408   }
1409   if (word_sz > (size_t)oopDesc::header_size()) {
1410     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1411                                  obj_ptr + oopDesc::header_size(),
1412                                  word_sz - oopDesc::header_size());
1413   }
1414 
1415   // Now we can track the promoted object, if necessary.  We take care
1416   // to delay the transition from uninitialized to full object
1417   // (i.e., insertion of klass pointer) until after, so that it
1418   // atomically becomes a promoted object.
1419   if (promoInfo->tracking()) {
1420     promoInfo->track((PromotedObject*)obj, old->klass());
1421   }
1422   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1423   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1424   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1425 
1426   // Finally, install the klass pointer (this should be volatile).
1427   OrderAccess::storestore();
1428   obj->set_klass(old->klass());
1429   // We should now be able to calculate the right size for this object
1430   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1431 
1432   collector()->promoted(true,          // parallel
1433                         obj_ptr, old->is_objArray(), word_sz);
1434 
1435   NOT_PRODUCT(
1436     Atomic::inc_ptr(&_numObjectsPromoted);
1437     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1438   )
1439 
1440   return obj;
1441 }
1442 
1443 void
1444 ConcurrentMarkSweepGeneration::
1445 par_promote_alloc_undo(int thread_num,
1446                        HeapWord* obj, size_t word_sz) {
1447   // CMS does not support promotion undo.
1448   ShouldNotReachHere();
1449 }
1450 
1451 void
1452 ConcurrentMarkSweepGeneration::
1453 par_promote_alloc_done(int thread_num) {
1454   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1455   ps->lab.retire(thread_num);
1456 }
1457 
1458 void
1459 ConcurrentMarkSweepGeneration::
1460 par_oop_since_save_marks_iterate_done(int thread_num) {
1461   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1462   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1463   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1464 }
1465 
1466 // XXXPERM
1467 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1468                                                    size_t size,
1469                                                    bool   tlab)
1470 {
1471   // We allow a STW collection only if a full
1472   // collection was requested.
1473   return full || should_allocate(size, tlab); // FIX ME !!!
1474   // This and promotion failure handling are connected at the
1475   // hip and should be fixed by untying them.
1476 }
1477 
1478 bool CMSCollector::shouldConcurrentCollect() {
1479   if (_full_gc_requested) {
1480     if (Verbose && PrintGCDetails) {
1481       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1482                              " gc request (or gc_locker)");
1483     }
1484     return true;
1485   }
1486 
1487   // For debugging purposes, change the type of collection.
1488   // If the rotation is not on the concurrent collection
1489   // type, don't start a concurrent collection.
1490   NOT_PRODUCT(
1491     if (RotateCMSCollectionTypes &&
1492         (_cmsGen->debug_collection_type() !=
1493           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1494       assert(_cmsGen->debug_collection_type() !=
1495         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1496         "Bad cms collection type");
1497       return false;
1498     }
1499   )
1500 
1501   FreelistLocker x(this);
1502   // ------------------------------------------------------------------
1503   // Print out lots of information which affects the initiation of
1504   // a collection.
1505   if (PrintCMSInitiationStatistics && stats().valid()) {
1506     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1507     gclog_or_tty->stamp();
1508     gclog_or_tty->print_cr("");
1509     stats().print_on(gclog_or_tty);
1510     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1511       stats().time_until_cms_gen_full());
1512     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1513     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1514                            _cmsGen->contiguous_available());
1515     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1516     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1517     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1518     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1519     gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
1520   }
1521   // ------------------------------------------------------------------
1522 
1523   // If the estimated time to complete a cms collection (cms_duration())
1524   // is less than the estimated time remaining until the cms generation
1525   // is full, start a collection.
1526   if (!UseCMSInitiatingOccupancyOnly) {
1527     if (stats().valid()) {
1528       if (stats().time_until_cms_start() == 0.0) {
1529         return true;
1530       }
1531     } else {
1532       // We want to conservatively collect somewhat early in order
1533       // to try and "bootstrap" our CMS/promotion statistics;
1534       // this branch will not fire after the first successful CMS
1535       // collection because the stats should then be valid.
1536       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1537         if (Verbose && PrintGCDetails) {
1538           gclog_or_tty->print_cr(
1539             " CMSCollector: collect for bootstrapping statistics:"
1540             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1541             _bootstrap_occupancy);
1542         }
1543         return true;
1544       }
1545     }
1546   }
1547 
1548   // Otherwise, we start a collection cycle if either the perm gen or
1549   // old gen want a collection cycle started. Each may use
1550   // an appropriate criterion for making this decision.
1551   // XXX We need to make sure that the gen expansion
1552   // criterion dovetails well with this. XXX NEED TO FIX THIS
1553   if (_cmsGen->should_concurrent_collect()) {
1554     if (Verbose && PrintGCDetails) {
1555       gclog_or_tty->print_cr("CMS old gen initiated");
1556     }
1557     return true;
1558   }
1559 
1560   // We start a collection if we believe an incremental collection may fail;
1561   // this is not likely to be productive in practice because it's probably too
1562   // late anyway.
1563   GenCollectedHeap* gch = GenCollectedHeap::heap();
1564   assert(gch->collector_policy()->is_two_generation_policy(),
1565          "You may want to check the correctness of the following");
1566   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1567     if (Verbose && PrintGCDetails) {
1568       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1569     }
1570     return true;
1571   }
1572 
1573   if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
1574     bool res = update_should_unload_classes();
1575     if (res) {
1576       if (Verbose && PrintGCDetails) {
1577         gclog_or_tty->print_cr("CMS perm gen initiated");
1578       }
1579       return true;
1580     }
1581   }
1582   return false;
1583 }
1584 
1585 // Clear _expansion_cause fields of constituent generations
1586 void CMSCollector::clear_expansion_cause() {
1587   _cmsGen->clear_expansion_cause();
1588   _permGen->clear_expansion_cause();
1589 }
1590 
1591 // We should be conservative in starting a collection cycle.  To
1592 // start too eagerly runs the risk of collecting too often in the
1593 // extreme.  To collect too rarely falls back on full collections,
1594 // which works, even if not optimum in terms of concurrent work.
1595 // As a work around for too eagerly collecting, use the flag
1596 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1597 // giving the user an easily understandable way of controlling the
1598 // collections.
1599 // We want to start a new collection cycle if any of the following
1600 // conditions hold:
1601 // . our current occupancy exceeds the configured initiating occupancy
1602 //   for this generation, or
1603 // . we recently needed to expand this space and have not, since that
1604 //   expansion, done a collection of this generation, or
1605 // . the underlying space believes that it may be a good idea to initiate
1606 //   a concurrent collection (this may be based on criteria such as the
1607 //   following: the space uses linear allocation and linear allocation is
1608 //   going to fail, or there is believed to be excessive fragmentation in
1609 //   the generation, etc... or ...
1610 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1611 //   the case of the old generation, not the perm generation; see CR 6543076):
1612 //   we may be approaching a point at which allocation requests may fail because
1613 //   we will be out of sufficient free space given allocation rate estimates.]
1614 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1615 
1616   assert_lock_strong(freelistLock());
1617   if (occupancy() > initiating_occupancy()) {
1618     if (PrintGCDetails && Verbose) {
1619       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1620         short_name(), occupancy(), initiating_occupancy());
1621     }
1622     return true;
1623   }
1624   if (UseCMSInitiatingOccupancyOnly) {
1625     return false;
1626   }
1627   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1628     if (PrintGCDetails && Verbose) {
1629       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1630         short_name());
1631     }
1632     return true;
1633   }
1634   if (_cmsSpace->should_concurrent_collect()) {
1635     if (PrintGCDetails && Verbose) {
1636       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1637         short_name());
1638     }
1639     return true;
1640   }
1641   return false;
1642 }
1643 
1644 void ConcurrentMarkSweepGeneration::collect(bool   full,
1645                                             bool   clear_all_soft_refs,
1646                                             size_t size,
1647                                             bool   tlab)
1648 {
1649   collector()->collect(full, clear_all_soft_refs, size, tlab);
1650 }
1651 
1652 void CMSCollector::collect(bool   full,
1653                            bool   clear_all_soft_refs,
1654                            size_t size,
1655                            bool   tlab)
1656 {
1657   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1658     // For debugging purposes skip the collection if the state
1659     // is not currently idle
1660     if (TraceCMSState) {
1661       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1662         Thread::current(), full, _collectorState);
1663     }
1664     return;
1665   }
1666 
1667   // The following "if" branch is present for defensive reasons.
1668   // In the current uses of this interface, it can be replaced with:
1669   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1670   // But I am not placing that assert here to allow future
1671   // generality in invoking this interface.
1672   if (GC_locker::is_active()) {
1673     // A consistency test for GC_locker
1674     assert(GC_locker::needs_gc(), "Should have been set already");
1675     // Skip this foreground collection, instead
1676     // expanding the heap if necessary.
1677     // Need the free list locks for the call to free() in compute_new_size()
1678     compute_new_size();
1679     return;
1680   }
1681   acquire_control_and_collect(full, clear_all_soft_refs);
1682   _full_gcs_since_conc_gc++;
1683 
1684 }
1685 
1686 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1687   GenCollectedHeap* gch = GenCollectedHeap::heap();
1688   unsigned int gc_count = gch->total_full_collections();
1689   if (gc_count == full_gc_count) {
1690     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1691     _full_gc_requested = true;
1692     CGC_lock->notify();   // nudge CMS thread
1693   } else {
1694     assert(gc_count > full_gc_count, "Error: causal loop");
1695   }
1696 }
1697 
1698 
1699 // The foreground and background collectors need to coordinate in order
1700 // to make sure that they do not mutually interfere with CMS collections.
1701 // When a background collection is active,
1702 // the foreground collector may need to take over (preempt) and
1703 // synchronously complete an ongoing collection. Depending on the
1704 // frequency of the background collections and the heap usage
1705 // of the application, this preemption can be seldom or frequent.
1706 // There are only certain
1707 // points in the background collection that the "collection-baton"
1708 // can be passed to the foreground collector.
1709 //
1710 // The foreground collector will wait for the baton before
1711 // starting any part of the collection.  The foreground collector
1712 // will only wait at one location.
1713 //
1714 // The background collector will yield the baton before starting a new
1715 // phase of the collection (e.g., before initial marking, marking from roots,
1716 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1717 // of the loop which switches the phases. The background collector does some
1718 // of the phases (initial mark, final re-mark) with the world stopped.
1719 // Because of locking involved in stopping the world,
1720 // the foreground collector should not block waiting for the background
1721 // collector when it is doing a stop-the-world phase.  The background
1722 // collector will yield the baton at an additional point just before
1723 // it enters a stop-the-world phase.  Once the world is stopped, the
1724 // background collector checks the phase of the collection.  If the
1725 // phase has not changed, it proceeds with the collection.  If the
1726 // phase has changed, it skips that phase of the collection.  See
1727 // the comments on the use of the Heap_lock in collect_in_background().
1728 //
1729 // Variable used in baton passing.
1730 //   _foregroundGCIsActive - Set to true by the foreground collector when
1731 //      it wants the baton.  The foreground clears it when it has finished
1732 //      the collection.
1733 //   _foregroundGCShouldWait - Set to true by the background collector
1734 //        when it is running.  The foreground collector waits while
1735 //      _foregroundGCShouldWait is true.
1736 //  CGC_lock - monitor used to protect access to the above variables
1737 //      and to notify the foreground and background collectors.
1738 //  _collectorState - current state of the CMS collection.
1739 //
1740 // The foreground collector
1741 //   acquires the CGC_lock
1742 //   sets _foregroundGCIsActive
1743 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1744 //     various locks acquired in preparation for the collection
1745 //     are released so as not to block the background collector
1746 //     that is in the midst of a collection
1747 //   proceeds with the collection
1748 //   clears _foregroundGCIsActive
1749 //   returns
1750 //
1751 // The background collector in a loop iterating on the phases of the
1752 //      collection
1753 //   acquires the CGC_lock
1754 //   sets _foregroundGCShouldWait
1755 //   if _foregroundGCIsActive is set
1756 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1757 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1758 //     and exits the loop.
1759 //   otherwise
1760 //     proceed with that phase of the collection
1761 //     if the phase is a stop-the-world phase,
1762 //       yield the baton once more just before enqueueing
1763 //       the stop-world CMS operation (executed by the VM thread).
1764 //   returns after all phases of the collection are done
1765 //
1766 
1767 void CMSCollector::acquire_control_and_collect(bool full,
1768         bool clear_all_soft_refs) {
1769   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1770   assert(!Thread::current()->is_ConcurrentGC_thread(),
1771          "shouldn't try to acquire control from self!");
1772 
1773   // Start the protocol for acquiring control of the
1774   // collection from the background collector (aka CMS thread).
1775   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1776          "VM thread should have CMS token");
1777   // Remember the possibly interrupted state of an ongoing
1778   // concurrent collection
1779   CollectorState first_state = _collectorState;
1780 
1781   // Signal to a possibly ongoing concurrent collection that
1782   // we want to do a foreground collection.
1783   _foregroundGCIsActive = true;
1784 
1785   // Disable incremental mode during a foreground collection.
1786   ICMSDisabler icms_disabler;
1787 
1788   // release locks and wait for a notify from the background collector
1789   // releasing the locks in only necessary for phases which
1790   // do yields to improve the granularity of the collection.
1791   assert_lock_strong(bitMapLock());
1792   // We need to lock the Free list lock for the space that we are
1793   // currently collecting.
1794   assert(haveFreelistLocks(), "Must be holding free list locks");
1795   bitMapLock()->unlock();
1796   releaseFreelistLocks();
1797   {
1798     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1799     if (_foregroundGCShouldWait) {
1800       // We are going to be waiting for action for the CMS thread;
1801       // it had better not be gone (for instance at shutdown)!
1802       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1803              "CMS thread must be running");
1804       // Wait here until the background collector gives us the go-ahead
1805       ConcurrentMarkSweepThread::clear_CMS_flag(
1806         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1807       // Get a possibly blocked CMS thread going:
1808       //   Note that we set _foregroundGCIsActive true above,
1809       //   without protection of the CGC_lock.
1810       CGC_lock->notify();
1811       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1812              "Possible deadlock");
1813       while (_foregroundGCShouldWait) {
1814         // wait for notification
1815         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1816         // Possibility of delay/starvation here, since CMS token does
1817         // not know to give priority to VM thread? Actually, i think
1818         // there wouldn't be any delay/starvation, but the proof of
1819         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1820       }
1821       ConcurrentMarkSweepThread::set_CMS_flag(
1822         ConcurrentMarkSweepThread::CMS_vm_has_token);
1823     }
1824   }
1825   // The CMS_token is already held.  Get back the other locks.
1826   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1827          "VM thread should have CMS token");
1828   getFreelistLocks();
1829   bitMapLock()->lock_without_safepoint_check();
1830   if (TraceCMSState) {
1831     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1832       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1833     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1834   }
1835 
1836   // Check if we need to do a compaction, or if not, whether
1837   // we need to start the mark-sweep from scratch.
1838   bool should_compact    = false;
1839   bool should_start_over = false;
1840   decide_foreground_collection_type(clear_all_soft_refs,
1841     &should_compact, &should_start_over);
1842 
1843 NOT_PRODUCT(
1844   if (RotateCMSCollectionTypes) {
1845     if (_cmsGen->debug_collection_type() ==
1846         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1847       should_compact = true;
1848     } else if (_cmsGen->debug_collection_type() ==
1849                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1850       should_compact = false;
1851     }
1852   }
1853 )
1854 
1855   if (PrintGCDetails && first_state > Idling) {
1856     GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1857     if (GCCause::is_user_requested_gc(cause) ||
1858         GCCause::is_serviceability_requested_gc(cause)) {
1859       gclog_or_tty->print(" (concurrent mode interrupted)");
1860     } else {
1861       gclog_or_tty->print(" (concurrent mode failure)");
1862     }
1863   }
1864 
1865   if (should_compact) {
1866     // If the collection is being acquired from the background
1867     // collector, there may be references on the discovered
1868     // references lists that have NULL referents (being those
1869     // that were concurrently cleared by a mutator) or
1870     // that are no longer active (having been enqueued concurrently
1871     // by the mutator).
1872     // Scrub the list of those references because Mark-Sweep-Compact
1873     // code assumes referents are not NULL and that all discovered
1874     // Reference objects are active.
1875     ref_processor()->clean_up_discovered_references();
1876 
1877     do_compaction_work(clear_all_soft_refs);
1878 
1879     // Has the GC time limit been exceeded?
1880     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1881     size_t max_eden_size = young_gen->max_capacity() -
1882                            young_gen->to()->capacity() -
1883                            young_gen->from()->capacity();
1884     GenCollectedHeap* gch = GenCollectedHeap::heap();
1885     GCCause::Cause gc_cause = gch->gc_cause();
1886     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1887                                            young_gen->eden()->used(),
1888                                            _cmsGen->max_capacity(),
1889                                            max_eden_size,
1890                                            full,
1891                                            gc_cause,
1892                                            gch->collector_policy());
1893   } else {
1894     do_mark_sweep_work(clear_all_soft_refs, first_state,
1895       should_start_over);
1896   }
1897   // Reset the expansion cause, now that we just completed
1898   // a collection cycle.
1899   clear_expansion_cause();
1900   _foregroundGCIsActive = false;
1901   return;
1902 }
1903 
1904 // Resize the perm generation and the tenured generation
1905 // after obtaining the free list locks for the
1906 // two generations.
1907 void CMSCollector::compute_new_size() {
1908   assert_locked_or_safepoint(Heap_lock);
1909   FreelistLocker z(this);
1910   _permGen->compute_new_size();
1911   _cmsGen->compute_new_size();
1912 }
1913 
1914 // A work method used by foreground collection to determine
1915 // what type of collection (compacting or not, continuing or fresh)
1916 // it should do.
1917 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1918 // and CMSCompactWhenClearAllSoftRefs the default in the future
1919 // and do away with the flags after a suitable period.
1920 void CMSCollector::decide_foreground_collection_type(
1921   bool clear_all_soft_refs, bool* should_compact,
1922   bool* should_start_over) {
1923   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1924   // flag is set, and we have either requested a System.gc() or
1925   // the number of full gc's since the last concurrent cycle
1926   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1927   // or if an incremental collection has failed
1928   GenCollectedHeap* gch = GenCollectedHeap::heap();
1929   assert(gch->collector_policy()->is_two_generation_policy(),
1930          "You may want to check the correctness of the following");
1931   // Inform cms gen if this was due to partial collection failing.
1932   // The CMS gen may use this fact to determine its expansion policy.
1933   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1934     assert(!_cmsGen->incremental_collection_failed(),
1935            "Should have been noticed, reacted to and cleared");
1936     _cmsGen->set_incremental_collection_failed();
1937   }
1938   *should_compact =
1939     UseCMSCompactAtFullCollection &&
1940     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1941      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1942      gch->incremental_collection_will_fail(true /* consult_young */));
1943   *should_start_over = false;
1944   if (clear_all_soft_refs && !*should_compact) {
1945     // We are about to do a last ditch collection attempt
1946     // so it would normally make sense to do a compaction
1947     // to reclaim as much space as possible.
1948     if (CMSCompactWhenClearAllSoftRefs) {
1949       // Default: The rationale is that in this case either
1950       // we are past the final marking phase, in which case
1951       // we'd have to start over, or so little has been done
1952       // that there's little point in saving that work. Compaction
1953       // appears to be the sensible choice in either case.
1954       *should_compact = true;
1955     } else {
1956       // We have been asked to clear all soft refs, but not to
1957       // compact. Make sure that we aren't past the final checkpoint
1958       // phase, for that is where we process soft refs. If we are already
1959       // past that phase, we'll need to redo the refs discovery phase and
1960       // if necessary clear soft refs that weren't previously
1961       // cleared. We do so by remembering the phase in which
1962       // we came in, and if we are past the refs processing
1963       // phase, we'll choose to just redo the mark-sweep
1964       // collection from scratch.
1965       if (_collectorState > FinalMarking) {
1966         // We are past the refs processing phase;
1967         // start over and do a fresh synchronous CMS cycle
1968         _collectorState = Resetting; // skip to reset to start new cycle
1969         reset(false /* == !asynch */);
1970         *should_start_over = true;
1971       } // else we can continue a possibly ongoing current cycle
1972     }
1973   }
1974 }
1975 
1976 // A work method used by the foreground collector to do
1977 // a mark-sweep-compact.
1978 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1979   GenCollectedHeap* gch = GenCollectedHeap::heap();
1980 
1981   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1982   gc_timer->register_gc_start(os::elapsed_counter());
1983 
1984   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1985   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1986 
1987   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1988   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1989     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1990       "collections passed to foreground collector", _full_gcs_since_conc_gc);
1991   }
1992 
1993   // Sample collection interval time and reset for collection pause.
1994   if (UseAdaptiveSizePolicy) {
1995     size_policy()->msc_collection_begin();
1996   }
1997 
1998   // Temporarily widen the span of the weak reference processing to
1999   // the entire heap.
2000   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2001   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2002   // Temporarily, clear the "is_alive_non_header" field of the
2003   // reference processor.
2004   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2005   // Temporarily make reference _processing_ single threaded (non-MT).
2006   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2007   // Temporarily make refs discovery atomic
2008   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2009   // Temporarily make reference _discovery_ single threaded (non-MT)
2010   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2011 
2012   ref_processor()->set_enqueuing_is_done(false);
2013   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2014   ref_processor()->setup_policy(clear_all_soft_refs);
2015   // If an asynchronous collection finishes, the _modUnionTable is
2016   // all clear.  If we are assuming the collection from an asynchronous
2017   // collection, clear the _modUnionTable.
2018   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2019     "_modUnionTable should be clear if the baton was not passed");
2020   _modUnionTable.clear_all();
2021 
2022   // We must adjust the allocation statistics being maintained
2023   // in the free list space. We do so by reading and clearing
2024   // the sweep timer and updating the block flux rate estimates below.
2025   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2026   if (_inter_sweep_timer.is_active()) {
2027     _inter_sweep_timer.stop();
2028     // Note that we do not use this sample to update the _inter_sweep_estimate.
2029     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2030                                             _inter_sweep_estimate.padded_average(),
2031                                             _intra_sweep_estimate.padded_average());
2032   }
2033 
2034   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2035     ref_processor(), clear_all_soft_refs);
2036   #ifdef ASSERT
2037     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2038     size_t free_size = cms_space->free();
2039     assert(free_size ==
2040            pointer_delta(cms_space->end(), cms_space->compaction_top())
2041            * HeapWordSize,
2042       "All the free space should be compacted into one chunk at top");
2043     assert(cms_space->dictionary()->total_chunk_size(
2044                                       debug_only(cms_space->freelistLock())) == 0 ||
2045            cms_space->totalSizeInIndexedFreeLists() == 0,
2046       "All the free space should be in a single chunk");
2047     size_t num = cms_space->totalCount();
2048     assert((free_size == 0 && num == 0) ||
2049            (free_size > 0  && (num == 1 || num == 2)),
2050          "There should be at most 2 free chunks after compaction");
2051   #endif // ASSERT
2052   _collectorState = Resetting;
2053   assert(_restart_addr == NULL,
2054          "Should have been NULL'd before baton was passed");
2055   reset(false /* == !asynch */);
2056   _cmsGen->reset_after_compaction();
2057   _concurrent_cycles_since_last_unload = 0;
2058 
2059   if (verifying() && !should_unload_classes()) {
2060     perm_gen_verify_bit_map()->clear_all();
2061   }
2062 
2063   // Clear any data recorded in the PLAB chunk arrays.
2064   if (_survivor_plab_array != NULL) {
2065     reset_survivor_plab_arrays();
2066   }
2067 
2068   // Adjust the per-size allocation stats for the next epoch.
2069   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2070   // Restart the "inter sweep timer" for the next epoch.
2071   _inter_sweep_timer.reset();
2072   _inter_sweep_timer.start();
2073 
2074   // Sample collection pause time and reset for collection interval.
2075   if (UseAdaptiveSizePolicy) {
2076     size_policy()->msc_collection_end(gch->gc_cause());
2077   }
2078 
2079   gc_timer->register_gc_end(os::elapsed_counter());
2080 
2081   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2082 
2083   // For a mark-sweep-compact, compute_new_size() will be called
2084   // in the heap's do_collection() method.
2085 }
2086 
2087 // A work method used by the foreground collector to do
2088 // a mark-sweep, after taking over from a possibly on-going
2089 // concurrent mark-sweep collection.
2090 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2091   CollectorState first_state, bool should_start_over) {
2092   if (PrintGC && Verbose) {
2093     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2094       "collector with count %d",
2095       _full_gcs_since_conc_gc);
2096   }
2097   switch (_collectorState) {
2098     case Idling:
2099       if (first_state == Idling || should_start_over) {
2100         // The background GC was not active, or should
2101         // restarted from scratch;  start the cycle.
2102         _collectorState = InitialMarking;
2103       }
2104       // If first_state was not Idling, then a background GC
2105       // was in progress and has now finished.  No need to do it
2106       // again.  Leave the state as Idling.
2107       break;
2108     case Precleaning:
2109       // In the foreground case don't do the precleaning since
2110       // it is not done concurrently and there is extra work
2111       // required.
2112       _collectorState = FinalMarking;
2113   }
2114   if (PrintGCDetails &&
2115       (_collectorState > Idling ||
2116        !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2117     gclog_or_tty->print(" (concurrent mode failure)");
2118   }
2119   collect_in_foreground(clear_all_soft_refs);
2120 
2121   // For a mark-sweep, compute_new_size() will be called
2122   // in the heap's do_collection() method.
2123 }
2124 
2125 
2126 void CMSCollector::getFreelistLocks() const {
2127   // Get locks for all free lists in all generations that this
2128   // collector is responsible for
2129   _cmsGen->freelistLock()->lock_without_safepoint_check();
2130   _permGen->freelistLock()->lock_without_safepoint_check();
2131 }
2132 
2133 void CMSCollector::releaseFreelistLocks() const {
2134   // Release locks for all free lists in all generations that this
2135   // collector is responsible for
2136   _cmsGen->freelistLock()->unlock();
2137   _permGen->freelistLock()->unlock();
2138 }
2139 
2140 bool CMSCollector::haveFreelistLocks() const {
2141   // Check locks for all free lists in all generations that this
2142   // collector is responsible for
2143   assert_lock_strong(_cmsGen->freelistLock());
2144   assert_lock_strong(_permGen->freelistLock());
2145   PRODUCT_ONLY(ShouldNotReachHere());
2146   return true;
2147 }
2148 
2149 // A utility class that is used by the CMS collector to
2150 // temporarily "release" the foreground collector from its
2151 // usual obligation to wait for the background collector to
2152 // complete an ongoing phase before proceeding.
2153 class ReleaseForegroundGC: public StackObj {
2154  private:
2155   CMSCollector* _c;
2156  public:
2157   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2158     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2159     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2160     // allow a potentially blocked foreground collector to proceed
2161     _c->_foregroundGCShouldWait = false;
2162     if (_c->_foregroundGCIsActive) {
2163       CGC_lock->notify();
2164     }
2165     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2166            "Possible deadlock");
2167   }
2168 
2169   ~ReleaseForegroundGC() {
2170     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2171     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2172     _c->_foregroundGCShouldWait = true;
2173   }
2174 };
2175 
2176 // There are separate collect_in_background and collect_in_foreground because of
2177 // the different locking requirements of the background collector and the
2178 // foreground collector.  There was originally an attempt to share
2179 // one "collect" method between the background collector and the foreground
2180 // collector but the if-then-else required made it cleaner to have
2181 // separate methods.
2182 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2183   assert(Thread::current()->is_ConcurrentGC_thread(),
2184     "A CMS asynchronous collection is only allowed on a CMS thread.");
2185 
2186   GenCollectedHeap* gch = GenCollectedHeap::heap();
2187   {
2188     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2189     MutexLockerEx hl(Heap_lock, safepoint_check);
2190     FreelistLocker fll(this);
2191     MutexLockerEx x(CGC_lock, safepoint_check);
2192     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2193       // The foreground collector is active or we're
2194       // not using asynchronous collections.  Skip this
2195       // background collection.
2196       assert(!_foregroundGCShouldWait, "Should be clear");
2197       return;
2198     } else {
2199       assert(_collectorState == Idling, "Should be idling before start.");
2200       _collectorState = InitialMarking;
2201       // Reset the expansion cause, now that we are about to begin
2202       // a new cycle.
2203       clear_expansion_cause();
2204     }
2205     // Decide if we want to enable class unloading as part of the
2206     // ensuing concurrent GC cycle.
2207     update_should_unload_classes();
2208     _full_gc_requested = false;           // acks all outstanding full gc requests
2209     // Signal that we are about to start a collection
2210     gch->increment_total_full_collections();  // ... starting a collection cycle
2211     _collection_count_start = gch->total_full_collections();
2212   }
2213 
2214   // Used for PrintGC
2215   size_t prev_used;
2216   if (PrintGC && Verbose) {
2217     prev_used = _cmsGen->used(); // XXXPERM
2218   }
2219 
2220   // The change of the collection state is normally done at this level;
2221   // the exceptions are phases that are executed while the world is
2222   // stopped.  For those phases the change of state is done while the
2223   // world is stopped.  For baton passing purposes this allows the
2224   // background collector to finish the phase and change state atomically.
2225   // The foreground collector cannot wait on a phase that is done
2226   // while the world is stopped because the foreground collector already
2227   // has the world stopped and would deadlock.
2228   while (_collectorState != Idling) {
2229     if (TraceCMSState) {
2230       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2231         Thread::current(), _collectorState);
2232     }
2233     // The foreground collector
2234     //   holds the Heap_lock throughout its collection.
2235     //   holds the CMS token (but not the lock)
2236     //     except while it is waiting for the background collector to yield.
2237     //
2238     // The foreground collector should be blocked (not for long)
2239     //   if the background collector is about to start a phase
2240     //   executed with world stopped.  If the background
2241     //   collector has already started such a phase, the
2242     //   foreground collector is blocked waiting for the
2243     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2244     //   are executed in the VM thread.
2245     //
2246     // The locking order is
2247     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2248     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2249     //   CMS token  (claimed in
2250     //                stop_world_and_do() -->
2251     //                  safepoint_synchronize() -->
2252     //                    CMSThread::synchronize())
2253 
2254     {
2255       // Check if the FG collector wants us to yield.
2256       CMSTokenSync x(true); // is cms thread
2257       if (waitForForegroundGC()) {
2258         // We yielded to a foreground GC, nothing more to be
2259         // done this round.
2260         assert(_foregroundGCShouldWait == false, "We set it to false in "
2261                "waitForForegroundGC()");
2262         if (TraceCMSState) {
2263           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2264             " exiting collection CMS state %d",
2265             Thread::current(), _collectorState);
2266         }
2267         return;
2268       } else {
2269         // The background collector can run but check to see if the
2270         // foreground collector has done a collection while the
2271         // background collector was waiting to get the CGC_lock
2272         // above.  If yes, break so that _foregroundGCShouldWait
2273         // is cleared before returning.
2274         if (_collectorState == Idling) {
2275           break;
2276         }
2277       }
2278     }
2279 
2280     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2281       "should be waiting");
2282 
2283     switch (_collectorState) {
2284       case InitialMarking:
2285         {
2286           ReleaseForegroundGC x(this);
2287           stats().record_cms_begin();
2288           register_gc_start(GCCause::_cms_concurrent_mark);
2289 
2290           VM_CMS_Initial_Mark initial_mark_op(this);
2291           VMThread::execute(&initial_mark_op);
2292         }
2293         // The collector state may be any legal state at this point
2294         // since the background collector may have yielded to the
2295         // foreground collector.
2296         break;
2297       case Marking:
2298         // initial marking in checkpointRootsInitialWork has been completed
2299         if (markFromRoots(true)) { // we were successful
2300           assert(_collectorState == Precleaning, "Collector state should "
2301             "have changed");
2302         } else {
2303           assert(_foregroundGCIsActive, "Internal state inconsistency");
2304         }
2305         break;
2306       case Precleaning:
2307         if (UseAdaptiveSizePolicy) {
2308           size_policy()->concurrent_precleaning_begin();
2309         }
2310         // marking from roots in markFromRoots has been completed
2311         preclean();
2312         if (UseAdaptiveSizePolicy) {
2313           size_policy()->concurrent_precleaning_end();
2314         }
2315         assert(_collectorState == AbortablePreclean ||
2316                _collectorState == FinalMarking,
2317                "Collector state should have changed");
2318         break;
2319       case AbortablePreclean:
2320         if (UseAdaptiveSizePolicy) {
2321         size_policy()->concurrent_phases_resume();
2322         }
2323         abortable_preclean();
2324         if (UseAdaptiveSizePolicy) {
2325           size_policy()->concurrent_precleaning_end();
2326         }
2327         assert(_collectorState == FinalMarking, "Collector state should "
2328           "have changed");
2329         break;
2330       case FinalMarking:
2331         {
2332           ReleaseForegroundGC x(this);
2333 
2334           VM_CMS_Final_Remark final_remark_op(this);
2335           VMThread::execute(&final_remark_op);
2336         }
2337         assert(_foregroundGCShouldWait, "block post-condition");
2338         break;
2339       case Sweeping:
2340         if (UseAdaptiveSizePolicy) {
2341           size_policy()->concurrent_sweeping_begin();
2342         }
2343         // final marking in checkpointRootsFinal has been completed
2344         sweep(true);
2345         assert(_collectorState == Resizing, "Collector state change "
2346           "to Resizing must be done under the free_list_lock");
2347         _full_gcs_since_conc_gc = 0;
2348 
2349         // Stop the timers for adaptive size policy for the concurrent phases
2350         if (UseAdaptiveSizePolicy) {
2351           size_policy()->concurrent_sweeping_end();
2352           size_policy()->concurrent_phases_end(gch->gc_cause(),
2353                                              gch->prev_gen(_cmsGen)->capacity(),
2354                                              _cmsGen->free());
2355         }
2356 
2357       case Resizing: {
2358         // Sweeping has been completed...
2359         // At this point the background collection has completed.
2360         // Don't move the call to compute_new_size() down
2361         // into code that might be executed if the background
2362         // collection was preempted.
2363         {
2364           ReleaseForegroundGC x(this);   // unblock FG collection
2365           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2366           CMSTokenSync        z(true);   // not strictly needed.
2367           if (_collectorState == Resizing) {
2368             compute_new_size();
2369             _collectorState = Resetting;
2370           } else {
2371             assert(_collectorState == Idling, "The state should only change"
2372                    " because the foreground collector has finished the collection");
2373           }
2374         }
2375         break;
2376       }
2377       case Resetting:
2378         // CMS heap resizing has been completed
2379         reset(true);
2380         assert(_collectorState == Idling, "Collector state should "
2381           "have changed");
2382         stats().record_cms_end();
2383         // Don't move the concurrent_phases_end() and compute_new_size()
2384         // calls to here because a preempted background collection
2385         // has it's state set to "Resetting".
2386         break;
2387       case Idling:
2388       default:
2389         ShouldNotReachHere();
2390         break;
2391     }
2392     if (TraceCMSState) {
2393       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2394         Thread::current(), _collectorState);
2395     }
2396     assert(_foregroundGCShouldWait, "block post-condition");
2397   }
2398 
2399   // Should this be in gc_epilogue?
2400   collector_policy()->counters()->update_counters();
2401 
2402   {
2403     // Clear _foregroundGCShouldWait and, in the event that the
2404     // foreground collector is waiting, notify it, before
2405     // returning.
2406     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2407     _foregroundGCShouldWait = false;
2408     if (_foregroundGCIsActive) {
2409       CGC_lock->notify();
2410     }
2411     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2412            "Possible deadlock");
2413   }
2414   if (TraceCMSState) {
2415     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2416       " exiting collection CMS state %d",
2417       Thread::current(), _collectorState);
2418   }
2419   if (PrintGC && Verbose) {
2420     _cmsGen->print_heap_change(prev_used);
2421   }
2422 }
2423 
2424 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2425   _cms_start_registered = true;
2426   CollectedHeap* heap = GenCollectedHeap::heap();
2427   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2428   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2429 }
2430 
2431 void CMSCollector::register_gc_end() {
2432   if (_cms_start_registered) {
2433     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2434     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2435     _cms_start_registered = false;
2436   }
2437 }
2438 
2439 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2440   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2441          "Foreground collector should be waiting, not executing");
2442   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2443     "may only be done by the VM Thread with the world stopped");
2444   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2445          "VM thread should have CMS token");
2446 
2447   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2448     true, NULL);)
2449   if (UseAdaptiveSizePolicy) {
2450     size_policy()->ms_collection_begin();
2451   }
2452   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2453 
2454   HandleMark hm;  // Discard invalid handles created during verification
2455 
2456   if (VerifyBeforeGC &&
2457       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2458     Universe::verify();
2459   }
2460 
2461   // Snapshot the soft reference policy to be used in this collection cycle.
2462   ref_processor()->setup_policy(clear_all_soft_refs);
2463 
2464   bool init_mark_was_synchronous = false; // until proven otherwise
2465   while (_collectorState != Idling) {
2466     if (TraceCMSState) {
2467       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2468         Thread::current(), _collectorState);
2469     }
2470     switch (_collectorState) {
2471       case InitialMarking:
2472         register_gc_start(GenCollectedHeap::heap()->gc_cause());
2473         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2474         checkpointRootsInitial(false);
2475         assert(_collectorState == Marking, "Collector state should have changed"
2476           " within checkpointRootsInitial()");
2477         break;
2478       case Marking:
2479         // initial marking in checkpointRootsInitialWork has been completed
2480         if (VerifyDuringGC &&
2481             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2482           gclog_or_tty->print("Verify before initial mark: ");
2483           Universe::verify();
2484         }
2485         {
2486           bool res = markFromRoots(false);
2487           assert(res && _collectorState == FinalMarking, "Collector state should "
2488             "have changed");
2489           break;
2490         }
2491       case FinalMarking:
2492         if (VerifyDuringGC &&
2493             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2494           gclog_or_tty->print("Verify before re-mark: ");
2495           Universe::verify();
2496         }
2497         checkpointRootsFinal(false, clear_all_soft_refs,
2498                              init_mark_was_synchronous);
2499         assert(_collectorState == Sweeping, "Collector state should not "
2500           "have changed within checkpointRootsFinal()");
2501         break;
2502       case Sweeping:
2503         // final marking in checkpointRootsFinal has been completed
2504         if (VerifyDuringGC &&
2505             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2506           gclog_or_tty->print("Verify before sweep: ");
2507           Universe::verify();
2508         }
2509         sweep(false);
2510         assert(_collectorState == Resizing, "Incorrect state");
2511         break;
2512       case Resizing: {
2513         // Sweeping has been completed; the actual resize in this case
2514         // is done separately; nothing to be done in this state.
2515         _collectorState = Resetting;
2516         break;
2517       }
2518       case Resetting:
2519         // The heap has been resized.
2520         if (VerifyDuringGC &&
2521             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2522           gclog_or_tty->print("Verify before reset: ");
2523           Universe::verify();
2524         }
2525         reset(false);
2526         assert(_collectorState == Idling, "Collector state should "
2527           "have changed");
2528         break;
2529       case Precleaning:
2530       case AbortablePreclean:
2531         // Elide the preclean phase
2532         _collectorState = FinalMarking;
2533         break;
2534       default:
2535         ShouldNotReachHere();
2536     }
2537     if (TraceCMSState) {
2538       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2539         Thread::current(), _collectorState);
2540     }
2541   }
2542 
2543   if (UseAdaptiveSizePolicy) {
2544     GenCollectedHeap* gch = GenCollectedHeap::heap();
2545     size_policy()->ms_collection_end(gch->gc_cause());
2546   }
2547 
2548   if (VerifyAfterGC &&
2549       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2550     Universe::verify();
2551   }
2552   if (TraceCMSState) {
2553     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2554       " exiting collection CMS state %d",
2555       Thread::current(), _collectorState);
2556   }
2557 }
2558 
2559 bool CMSCollector::waitForForegroundGC() {
2560   bool res = false;
2561   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2562          "CMS thread should have CMS token");
2563   // Block the foreground collector until the
2564   // background collectors decides whether to
2565   // yield.
2566   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2567   _foregroundGCShouldWait = true;
2568   if (_foregroundGCIsActive) {
2569     // The background collector yields to the
2570     // foreground collector and returns a value
2571     // indicating that it has yielded.  The foreground
2572     // collector can proceed.
2573     res = true;
2574     _foregroundGCShouldWait = false;
2575     ConcurrentMarkSweepThread::clear_CMS_flag(
2576       ConcurrentMarkSweepThread::CMS_cms_has_token);
2577     ConcurrentMarkSweepThread::set_CMS_flag(
2578       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2579     // Get a possibly blocked foreground thread going
2580     CGC_lock->notify();
2581     if (TraceCMSState) {
2582       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2583         Thread::current(), _collectorState);
2584     }
2585     while (_foregroundGCIsActive) {
2586       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2587     }
2588     ConcurrentMarkSweepThread::set_CMS_flag(
2589       ConcurrentMarkSweepThread::CMS_cms_has_token);
2590     ConcurrentMarkSweepThread::clear_CMS_flag(
2591       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2592   }
2593   if (TraceCMSState) {
2594     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2595       Thread::current(), _collectorState);
2596   }
2597   return res;
2598 }
2599 
2600 // Because of the need to lock the free lists and other structures in
2601 // the collector, common to all the generations that the collector is
2602 // collecting, we need the gc_prologues of individual CMS generations
2603 // delegate to their collector. It may have been simpler had the
2604 // current infrastructure allowed one to call a prologue on a
2605 // collector. In the absence of that we have the generation's
2606 // prologue delegate to the collector, which delegates back
2607 // some "local" work to a worker method in the individual generations
2608 // that it's responsible for collecting, while itself doing any
2609 // work common to all generations it's responsible for. A similar
2610 // comment applies to the  gc_epilogue()'s.
2611 // The role of the varaible _between_prologue_and_epilogue is to
2612 // enforce the invocation protocol.
2613 void CMSCollector::gc_prologue(bool full) {
2614   // Call gc_prologue_work() for each CMSGen and PermGen that
2615   // we are responsible for.
2616 
2617   // The following locking discipline assumes that we are only called
2618   // when the world is stopped.
2619   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2620 
2621   // The CMSCollector prologue must call the gc_prologues for the
2622   // "generations" (including PermGen if any) that it's responsible
2623   // for.
2624 
2625   assert(   Thread::current()->is_VM_thread()
2626          || (   CMSScavengeBeforeRemark
2627              && Thread::current()->is_ConcurrentGC_thread()),
2628          "Incorrect thread type for prologue execution");
2629 
2630   if (_between_prologue_and_epilogue) {
2631     // We have already been invoked; this is a gc_prologue delegation
2632     // from yet another CMS generation that we are responsible for, just
2633     // ignore it since all relevant work has already been done.
2634     return;
2635   }
2636 
2637   // set a bit saying prologue has been called; cleared in epilogue
2638   _between_prologue_and_epilogue = true;
2639   // Claim locks for common data structures, then call gc_prologue_work()
2640   // for each CMSGen and PermGen that we are responsible for.
2641 
2642   getFreelistLocks();   // gets free list locks on constituent spaces
2643   bitMapLock()->lock_without_safepoint_check();
2644 
2645   // Should call gc_prologue_work() for all cms gens we are responsible for
2646   bool registerClosure =    _collectorState >= Marking
2647                          && _collectorState < Sweeping;
2648   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2649                                                &_modUnionClosurePar
2650                                                : &_modUnionClosure;
2651   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2652   _permGen->gc_prologue_work(full, registerClosure, muc);
2653 
2654   if (!full) {
2655     stats().record_gc0_begin();
2656   }
2657 }
2658 
2659 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2660   // Delegate to CMScollector which knows how to coordinate between
2661   // this and any other CMS generations that it is responsible for
2662   // collecting.
2663   collector()->gc_prologue(full);
2664 }
2665 
2666 // This is a "private" interface for use by this generation's CMSCollector.
2667 // Not to be called directly by any other entity (for instance,
2668 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2669 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2670   bool registerClosure, ModUnionClosure* modUnionClosure) {
2671   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2672   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2673     "Should be NULL");
2674   if (registerClosure) {
2675     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2676   }
2677   cmsSpace()->gc_prologue();
2678   // Clear stat counters
2679   NOT_PRODUCT(
2680     assert(_numObjectsPromoted == 0, "check");
2681     assert(_numWordsPromoted   == 0, "check");
2682     if (Verbose && PrintGC) {
2683       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2684                           SIZE_FORMAT" bytes concurrently",
2685       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2686     }
2687     _numObjectsAllocated = 0;
2688     _numWordsAllocated   = 0;
2689   )
2690 }
2691 
2692 void CMSCollector::gc_epilogue(bool full) {
2693   // The following locking discipline assumes that we are only called
2694   // when the world is stopped.
2695   assert(SafepointSynchronize::is_at_safepoint(),
2696          "world is stopped assumption");
2697 
2698   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2699   // if linear allocation blocks need to be appropriately marked to allow the
2700   // the blocks to be parsable. We also check here whether we need to nudge the
2701   // CMS collector thread to start a new cycle (if it's not already active).
2702   assert(   Thread::current()->is_VM_thread()
2703          || (   CMSScavengeBeforeRemark
2704              && Thread::current()->is_ConcurrentGC_thread()),
2705          "Incorrect thread type for epilogue execution");
2706 
2707   if (!_between_prologue_and_epilogue) {
2708     // We have already been invoked; this is a gc_epilogue delegation
2709     // from yet another CMS generation that we are responsible for, just
2710     // ignore it since all relevant work has already been done.
2711     return;
2712   }
2713   assert(haveFreelistLocks(), "must have freelist locks");
2714   assert_lock_strong(bitMapLock());
2715 
2716   _cmsGen->gc_epilogue_work(full);
2717   _permGen->gc_epilogue_work(full);
2718 
2719   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2720     // in case sampling was not already enabled, enable it
2721     _start_sampling = true;
2722   }
2723   // reset _eden_chunk_array so sampling starts afresh
2724   _eden_chunk_index = 0;
2725 
2726   size_t cms_used   = _cmsGen->cmsSpace()->used();
2727   size_t perm_used  = _permGen->cmsSpace()->used();
2728 
2729   // update performance counters - this uses a special version of
2730   // update_counters() that allows the utilization to be passed as a
2731   // parameter, avoiding multiple calls to used().
2732   //
2733   _cmsGen->update_counters(cms_used);
2734   _permGen->update_counters(perm_used);
2735 
2736   if (CMSIncrementalMode) {
2737     icms_update_allocation_limits();
2738   }
2739 
2740   bitMapLock()->unlock();
2741   releaseFreelistLocks();
2742 
2743   if (!CleanChunkPoolAsync) {
2744     Chunk::clean_chunk_pool();
2745   }
2746 
2747   _between_prologue_and_epilogue = false;  // ready for next cycle
2748 }
2749 
2750 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2751   collector()->gc_epilogue(full);
2752 
2753   // Also reset promotion tracking in par gc thread states.
2754   if (CollectedHeap::use_parallel_gc_threads()) {
2755     for (uint i = 0; i < ParallelGCThreads; i++) {
2756       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2757     }
2758   }
2759 }
2760 
2761 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2762   assert(!incremental_collection_failed(), "Should have been cleared");
2763   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2764   cmsSpace()->gc_epilogue();
2765     // Print stat counters
2766   NOT_PRODUCT(
2767     assert(_numObjectsAllocated == 0, "check");
2768     assert(_numWordsAllocated == 0, "check");
2769     if (Verbose && PrintGC) {
2770       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2771                           SIZE_FORMAT" bytes",
2772                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2773     }
2774     _numObjectsPromoted = 0;
2775     _numWordsPromoted   = 0;
2776   )
2777 
2778   if (PrintGC && Verbose) {
2779     // Call down the chain in contiguous_available needs the freelistLock
2780     // so print this out before releasing the freeListLock.
2781     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2782                         contiguous_available());
2783   }
2784 }
2785 
2786 #ifndef PRODUCT
2787 bool CMSCollector::have_cms_token() {
2788   Thread* thr = Thread::current();
2789   if (thr->is_VM_thread()) {
2790     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2791   } else if (thr->is_ConcurrentGC_thread()) {
2792     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2793   } else if (thr->is_GC_task_thread()) {
2794     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2795            ParGCRareEvent_lock->owned_by_self();
2796   }
2797   return false;
2798 }
2799 #endif
2800 
2801 // Check reachability of the given heap address in CMS generation,
2802 // treating all other generations as roots.
2803 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2804   // We could "guarantee" below, rather than assert, but i'll
2805   // leave these as "asserts" so that an adventurous debugger
2806   // could try this in the product build provided some subset of
2807   // the conditions were met, provided they were intersted in the
2808   // results and knew that the computation below wouldn't interfere
2809   // with other concurrent computations mutating the structures
2810   // being read or written.
2811   assert(SafepointSynchronize::is_at_safepoint(),
2812          "Else mutations in object graph will make answer suspect");
2813   assert(have_cms_token(), "Should hold cms token");
2814   assert(haveFreelistLocks(), "must hold free list locks");
2815   assert_lock_strong(bitMapLock());
2816 
2817   // Clear the marking bit map array before starting, but, just
2818   // for kicks, first report if the given address is already marked
2819   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2820                 _markBitMap.isMarked(addr) ? "" : " not");
2821 
2822   if (verify_after_remark()) {
2823     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2824     bool result = verification_mark_bm()->isMarked(addr);
2825     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2826                            result ? "IS" : "is NOT");
2827     return result;
2828   } else {
2829     gclog_or_tty->print_cr("Could not compute result");
2830     return false;
2831   }
2832 }
2833 
2834 ////////////////////////////////////////////////////////
2835 // CMS Verification Support
2836 ////////////////////////////////////////////////////////
2837 // Following the remark phase, the following invariant
2838 // should hold -- each object in the CMS heap which is
2839 // marked in markBitMap() should be marked in the verification_mark_bm().
2840 
2841 class VerifyMarkedClosure: public BitMapClosure {
2842   CMSBitMap* _marks;
2843   bool       _failed;
2844 
2845  public:
2846   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2847 
2848   bool do_bit(size_t offset) {
2849     HeapWord* addr = _marks->offsetToHeapWord(offset);
2850     if (!_marks->isMarked(addr)) {
2851       oop(addr)->print_on(gclog_or_tty);
2852       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2853       _failed = true;
2854     }
2855     return true;
2856   }
2857 
2858   bool failed() { return _failed; }
2859 };
2860 
2861 bool CMSCollector::verify_after_remark() {
2862   gclog_or_tty->print(" [Verifying CMS Marking... ");
2863   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2864   static bool init = false;
2865 
2866   assert(SafepointSynchronize::is_at_safepoint(),
2867          "Else mutations in object graph will make answer suspect");
2868   assert(have_cms_token(),
2869          "Else there may be mutual interference in use of "
2870          " verification data structures");
2871   assert(_collectorState > Marking && _collectorState <= Sweeping,
2872          "Else marking info checked here may be obsolete");
2873   assert(haveFreelistLocks(), "must hold free list locks");
2874   assert_lock_strong(bitMapLock());
2875 
2876 
2877   // Allocate marking bit map if not already allocated
2878   if (!init) { // first time
2879     if (!verification_mark_bm()->allocate(_span)) {
2880       return false;
2881     }
2882     init = true;
2883   }
2884 
2885   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2886 
2887   // Turn off refs discovery -- so we will be tracing through refs.
2888   // This is as intended, because by this time
2889   // GC must already have cleared any refs that need to be cleared,
2890   // and traced those that need to be marked; moreover,
2891   // the marking done here is not going to intefere in any
2892   // way with the marking information used by GC.
2893   NoRefDiscovery no_discovery(ref_processor());
2894 
2895   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2896 
2897   // Clear any marks from a previous round
2898   verification_mark_bm()->clear_all();
2899   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2900   verify_work_stacks_empty();
2901 
2902   GenCollectedHeap* gch = GenCollectedHeap::heap();
2903   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2904   // Update the saved marks which may affect the root scans.
2905   gch->save_marks();
2906 
2907   if (CMSRemarkVerifyVariant == 1) {
2908     // In this first variant of verification, we complete
2909     // all marking, then check if the new marks-verctor is
2910     // a subset of the CMS marks-vector.
2911     verify_after_remark_work_1();
2912   } else if (CMSRemarkVerifyVariant == 2) {
2913     // In this second variant of verification, we flag an error
2914     // (i.e. an object reachable in the new marks-vector not reachable
2915     // in the CMS marks-vector) immediately, also indicating the
2916     // identify of an object (A) that references the unmarked object (B) --
2917     // presumably, a mutation to A failed to be picked up by preclean/remark?
2918     verify_after_remark_work_2();
2919   } else {
2920     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2921             CMSRemarkVerifyVariant);
2922   }
2923   gclog_or_tty->print(" done] ");
2924   return true;
2925 }
2926 
2927 void CMSCollector::verify_after_remark_work_1() {
2928   ResourceMark rm;
2929   HandleMark  hm;
2930   GenCollectedHeap* gch = GenCollectedHeap::heap();
2931 
2932   // Mark from roots one level into CMS
2933   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2934   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2935 
2936   gch->gen_process_strong_roots(_cmsGen->level(),
2937                                 true,   // younger gens are roots
2938                                 true,   // activate StrongRootsScope
2939                                 true,   // collecting perm gen
2940                                 SharedHeap::ScanningOption(roots_scanning_options()),
2941                                 &notOlder,
2942                                 true,   // walk code active on stacks
2943                                 NULL);
2944 
2945   // Now mark from the roots
2946   assert(_revisitStack.isEmpty(), "Should be empty");
2947   MarkFromRootsClosure markFromRootsClosure(this, _span,
2948     verification_mark_bm(), verification_mark_stack(), &_revisitStack,
2949     false /* don't yield */, true /* verifying */);
2950   assert(_restart_addr == NULL, "Expected pre-condition");
2951   verification_mark_bm()->iterate(&markFromRootsClosure);
2952   while (_restart_addr != NULL) {
2953     // Deal with stack overflow: by restarting at the indicated
2954     // address.
2955     HeapWord* ra = _restart_addr;
2956     markFromRootsClosure.reset(ra);
2957     _restart_addr = NULL;
2958     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2959   }
2960   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2961   verify_work_stacks_empty();
2962   // Should reset the revisit stack above, since no class tree
2963   // surgery is forthcoming.
2964   _revisitStack.reset(); // throwing away all contents
2965 
2966   // Marking completed -- now verify that each bit marked in
2967   // verification_mark_bm() is also marked in markBitMap(); flag all
2968   // errors by printing corresponding objects.
2969   VerifyMarkedClosure vcl(markBitMap());
2970   verification_mark_bm()->iterate(&vcl);
2971   if (vcl.failed()) {
2972     gclog_or_tty->print("Verification failed");
2973     Universe::heap()->print_on(gclog_or_tty);
2974     fatal("CMS: failed marking verification after remark");
2975   }
2976 }
2977 
2978 void CMSCollector::verify_after_remark_work_2() {
2979   ResourceMark rm;
2980   HandleMark  hm;
2981   GenCollectedHeap* gch = GenCollectedHeap::heap();
2982 
2983   // Mark from roots one level into CMS
2984   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2985                                      markBitMap());
2986   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2987   gch->gen_process_strong_roots(_cmsGen->level(),
2988                                 true,   // younger gens are roots
2989                                 true,   // activate StrongRootsScope
2990                                 true,   // collecting perm gen
2991                                 SharedHeap::ScanningOption(roots_scanning_options()),
2992                                 &notOlder,
2993                                 true,   // walk code active on stacks
2994                                 NULL);
2995 
2996   // Now mark from the roots
2997   assert(_revisitStack.isEmpty(), "Should be empty");
2998   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2999     verification_mark_bm(), markBitMap(), verification_mark_stack());
3000   assert(_restart_addr == NULL, "Expected pre-condition");
3001   verification_mark_bm()->iterate(&markFromRootsClosure);
3002   while (_restart_addr != NULL) {
3003     // Deal with stack overflow: by restarting at the indicated
3004     // address.
3005     HeapWord* ra = _restart_addr;
3006     markFromRootsClosure.reset(ra);
3007     _restart_addr = NULL;
3008     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3009   }
3010   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3011   verify_work_stacks_empty();
3012   // Should reset the revisit stack above, since no class tree
3013   // surgery is forthcoming.
3014   _revisitStack.reset(); // throwing away all contents
3015 
3016   // Marking completed -- now verify that each bit marked in
3017   // verification_mark_bm() is also marked in markBitMap(); flag all
3018   // errors by printing corresponding objects.
3019   VerifyMarkedClosure vcl(markBitMap());
3020   verification_mark_bm()->iterate(&vcl);
3021   assert(!vcl.failed(), "Else verification above should not have succeeded");
3022 }
3023 
3024 void ConcurrentMarkSweepGeneration::save_marks() {
3025   // delegate to CMS space
3026   cmsSpace()->save_marks();
3027   for (uint i = 0; i < ParallelGCThreads; i++) {
3028     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3029   }
3030 }
3031 
3032 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3033   return cmsSpace()->no_allocs_since_save_marks();
3034 }
3035 
3036 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3037                                                                 \
3038 void ConcurrentMarkSweepGeneration::                            \
3039 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3040   cl->set_generation(this);                                     \
3041   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3042   cl->reset_generation();                                       \
3043   save_marks();                                                 \
3044 }
3045 
3046 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3047 
3048 void
3049 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
3050 {
3051   // Not currently implemented; need to do the following. -- ysr.
3052   // dld -- I think that is used for some sort of allocation profiler.  So it
3053   // really means the objects allocated by the mutator since the last
3054   // GC.  We could potentially implement this cheaply by recording only
3055   // the direct allocations in a side data structure.
3056   //
3057   // I think we probably ought not to be required to support these
3058   // iterations at any arbitrary point; I think there ought to be some
3059   // call to enable/disable allocation profiling in a generation/space,
3060   // and the iterator ought to return the objects allocated in the
3061   // gen/space since the enable call, or the last iterator call (which
3062   // will probably be at a GC.)  That way, for gens like CM&S that would
3063   // require some extra data structure to support this, we only pay the
3064   // cost when it's in use...
3065   cmsSpace()->object_iterate_since_last_GC(blk);
3066 }
3067 
3068 void
3069 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3070   cl->set_generation(this);
3071   younger_refs_in_space_iterate(_cmsSpace, cl);
3072   cl->reset_generation();
3073 }
3074 
3075 void
3076 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
3077   if (freelistLock()->owned_by_self()) {
3078     Generation::oop_iterate(mr, cl);
3079   } else {
3080     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3081     Generation::oop_iterate(mr, cl);
3082   }
3083 }
3084 
3085 void
3086 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
3087   if (freelistLock()->owned_by_self()) {
3088     Generation::oop_iterate(cl);
3089   } else {
3090     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3091     Generation::oop_iterate(cl);
3092   }
3093 }
3094 
3095 void
3096 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3097   if (freelistLock()->owned_by_self()) {
3098     Generation::object_iterate(cl);
3099   } else {
3100     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3101     Generation::object_iterate(cl);
3102   }
3103 }
3104 
3105 void
3106 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3107   if (freelistLock()->owned_by_self()) {
3108     Generation::safe_object_iterate(cl);
3109   } else {
3110     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3111     Generation::safe_object_iterate(cl);
3112   }
3113 }
3114 
3115 void
3116 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
3117 }
3118 
3119 void
3120 ConcurrentMarkSweepGeneration::post_compact() {
3121 }
3122 
3123 void
3124 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3125   // Fix the linear allocation blocks to look like free blocks.
3126 
3127   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3128   // are not called when the heap is verified during universe initialization and
3129   // at vm shutdown.
3130   if (freelistLock()->owned_by_self()) {
3131     cmsSpace()->prepare_for_verify();
3132   } else {
3133     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3134     cmsSpace()->prepare_for_verify();
3135   }
3136 }
3137 
3138 void
3139 ConcurrentMarkSweepGeneration::verify() {
3140   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3141   // are not called when the heap is verified during universe initialization and
3142   // at vm shutdown.
3143   if (freelistLock()->owned_by_self()) {
3144     cmsSpace()->verify();
3145   } else {
3146     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3147     cmsSpace()->verify();
3148   }
3149 }
3150 
3151 void CMSCollector::verify() {
3152   _cmsGen->verify();
3153   _permGen->verify();
3154 }
3155 
3156 #ifndef PRODUCT
3157 bool CMSCollector::overflow_list_is_empty() const {
3158   assert(_num_par_pushes >= 0, "Inconsistency");
3159   if (_overflow_list == NULL) {
3160     assert(_num_par_pushes == 0, "Inconsistency");
3161   }
3162   return _overflow_list == NULL;
3163 }
3164 
3165 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3166 // merely consolidate assertion checks that appear to occur together frequently.
3167 void CMSCollector::verify_work_stacks_empty() const {
3168   assert(_markStack.isEmpty(), "Marking stack should be empty");
3169   assert(overflow_list_is_empty(), "Overflow list should be empty");
3170 }
3171 
3172 void CMSCollector::verify_overflow_empty() const {
3173   assert(overflow_list_is_empty(), "Overflow list should be empty");
3174   assert(no_preserved_marks(), "No preserved marks");
3175 }
3176 #endif // PRODUCT
3177 
3178 // Decide if we want to enable class unloading as part of the
3179 // ensuing concurrent GC cycle. We will collect the perm gen and
3180 // unload classes if it's the case that:
3181 // (1) an explicit gc request has been made and the flag
3182 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3183 // (2) (a) class unloading is enabled at the command line, and
3184 //     (b) (i)   perm gen threshold has been crossed, or
3185 //         (ii)  old gen is getting really full, or
3186 //         (iii) the previous N CMS collections did not collect the
3187 //               perm gen
3188 // NOTE: Provided there is no change in the state of the heap between
3189 // calls to this method, it should have idempotent results. Moreover,
3190 // its results should be monotonically increasing (i.e. going from 0 to 1,
3191 // but not 1 to 0) between successive calls between which the heap was
3192 // not collected. For the implementation below, it must thus rely on
3193 // the property that concurrent_cycles_since_last_unload()
3194 // will not decrease unless a collection cycle happened and that
3195 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
3196 // themselves also monotonic in that sense. See check_monotonicity()
3197 // below.
3198 bool CMSCollector::update_should_unload_classes() {
3199   _should_unload_classes = false;
3200   // Condition 1 above
3201   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3202     _should_unload_classes = true;
3203   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3204     // Disjuncts 2.b.(i,ii,iii) above
3205     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3206                               CMSClassUnloadingMaxInterval)
3207                            || _permGen->should_concurrent_collect()
3208                            || _cmsGen->is_too_full();
3209   }
3210   return _should_unload_classes;
3211 }
3212 
3213 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3214   bool res = should_concurrent_collect();
3215   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3216   return res;
3217 }
3218 
3219 void CMSCollector::setup_cms_unloading_and_verification_state() {
3220   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3221                              || VerifyBeforeExit;
3222   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3223 
3224   if (should_unload_classes()) {   // Should unload classes this cycle
3225     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3226     set_verifying(should_verify);    // Set verification state for this cycle
3227     return;                            // Nothing else needs to be done at this time
3228   }
3229 
3230   // Not unloading classes this cycle
3231   assert(!should_unload_classes(), "Inconsitency!");
3232   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3233     // We were not verifying, or we _were_ unloading classes in the last cycle,
3234     // AND some verification options are enabled this cycle; in this case,
3235     // we must make sure that the deadness map is allocated if not already so,
3236     // and cleared (if already allocated previously --
3237     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
3238     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
3239       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
3240         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
3241                 "permanent generation verification disabled");
3242         return;  // Note that we leave verification disabled, so we'll retry this
3243                  // allocation next cycle. We _could_ remember this failure
3244                  // and skip further attempts and permanently disable verification
3245                  // attempts if that is considered more desirable.
3246       }
3247       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
3248               "_perm_gen_ver_bit_map inconsistency?");
3249     } else {
3250       perm_gen_verify_bit_map()->clear_all();
3251     }
3252     // Include symbols, strings and code cache elements to prevent their resurrection.
3253     add_root_scanning_option(rso);
3254     set_verifying(true);
3255   } else if (verifying() && !should_verify) {
3256     // We were verifying, but some verification flags got disabled.
3257     set_verifying(false);
3258     // Exclude symbols, strings and code cache elements from root scanning to
3259     // reduce IM and RM pauses.
3260     remove_root_scanning_option(rso);
3261   }
3262 }
3263 
3264 
3265 #ifndef PRODUCT
3266 HeapWord* CMSCollector::block_start(const void* p) const {
3267   const HeapWord* addr = (HeapWord*)p;
3268   if (_span.contains(p)) {
3269     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3270       return _cmsGen->cmsSpace()->block_start(p);
3271     } else {
3272       assert(_permGen->cmsSpace()->is_in_reserved(addr),
3273              "Inconsistent _span?");
3274       return _permGen->cmsSpace()->block_start(p);
3275     }
3276   }
3277   return NULL;
3278 }
3279 #endif
3280 
3281 HeapWord*
3282 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3283                                                    bool   tlab,
3284                                                    bool   parallel) {
3285   CMSSynchronousYieldRequest yr;
3286   assert(!tlab, "Can't deal with TLAB allocation");
3287   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3288   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3289     CMSExpansionCause::_satisfy_allocation);
3290   if (GCExpandToAllocateDelayMillis > 0) {
3291     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3292   }
3293   return have_lock_and_allocate(word_size, tlab);
3294 }
3295 
3296 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3297 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3298 // to CardGeneration and share it...
3299 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3300   return CardGeneration::expand(bytes, expand_bytes);
3301 }
3302 
3303 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3304   CMSExpansionCause::Cause cause)
3305 {
3306 
3307   bool success = expand(bytes, expand_bytes);
3308 
3309   // remember why we expanded; this information is used
3310   // by shouldConcurrentCollect() when making decisions on whether to start
3311   // a new CMS cycle.
3312   if (success) {
3313     set_expansion_cause(cause);
3314     if (PrintGCDetails && Verbose) {
3315       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3316         CMSExpansionCause::to_string(cause));
3317     }
3318   }
3319 }
3320 
3321 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3322   HeapWord* res = NULL;
3323   MutexLocker x(ParGCRareEvent_lock);
3324   while (true) {
3325     // Expansion by some other thread might make alloc OK now:
3326     res = ps->lab.alloc(word_sz);
3327     if (res != NULL) return res;
3328     // If there's not enough expansion space available, give up.
3329     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3330       return NULL;
3331     }
3332     // Otherwise, we try expansion.
3333     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3334       CMSExpansionCause::_allocate_par_lab);
3335     // Now go around the loop and try alloc again;
3336     // A competing par_promote might beat us to the expansion space,
3337     // so we may go around the loop again if promotion fails agaion.
3338     if (GCExpandToAllocateDelayMillis > 0) {
3339       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3340     }
3341   }
3342 }
3343 
3344 
3345 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3346   PromotionInfo* promo) {
3347   MutexLocker x(ParGCRareEvent_lock);
3348   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3349   while (true) {
3350     // Expansion by some other thread might make alloc OK now:
3351     if (promo->ensure_spooling_space()) {
3352       assert(promo->has_spooling_space(),
3353              "Post-condition of successful ensure_spooling_space()");
3354       return true;
3355     }
3356     // If there's not enough expansion space available, give up.
3357     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3358       return false;
3359     }
3360     // Otherwise, we try expansion.
3361     expand(refill_size_bytes, MinHeapDeltaBytes,
3362       CMSExpansionCause::_allocate_par_spooling_space);
3363     // Now go around the loop and try alloc again;
3364     // A competing allocation might beat us to the expansion space,
3365     // so we may go around the loop again if allocation fails again.
3366     if (GCExpandToAllocateDelayMillis > 0) {
3367       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3368     }
3369   }
3370 }
3371 
3372 
3373 
3374 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3375   assert_locked_or_safepoint(Heap_lock);
3376   size_t size = ReservedSpace::page_align_size_down(bytes);
3377   if (size > 0) {
3378     shrink_by(size);
3379   }
3380 }
3381 
3382 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3383   assert_locked_or_safepoint(Heap_lock);
3384   bool result = _virtual_space.expand_by(bytes);
3385   if (result) {
3386     HeapWord* old_end = _cmsSpace->end();
3387     size_t new_word_size =
3388       heap_word_size(_virtual_space.committed_size());
3389     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3390     _bts->resize(new_word_size);  // resize the block offset shared array
3391     Universe::heap()->barrier_set()->resize_covered_region(mr);
3392     // Hmmmm... why doesn't CFLS::set_end verify locking?
3393     // This is quite ugly; FIX ME XXX
3394     _cmsSpace->assert_locked(freelistLock());
3395     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3396 
3397     // update the space and generation capacity counters
3398     if (UsePerfData) {
3399       _space_counters->update_capacity();
3400       _gen_counters->update_all();
3401     }
3402 
3403     if (Verbose && PrintGC) {
3404       size_t new_mem_size = _virtual_space.committed_size();
3405       size_t old_mem_size = new_mem_size - bytes;
3406       gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3407                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3408     }
3409   }
3410   return result;
3411 }
3412 
3413 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3414   assert_locked_or_safepoint(Heap_lock);
3415   bool success = true;
3416   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3417   if (remaining_bytes > 0) {
3418     success = grow_by(remaining_bytes);
3419     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3420   }
3421   return success;
3422 }
3423 
3424 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3425   assert_locked_or_safepoint(Heap_lock);
3426   assert_lock_strong(freelistLock());
3427   // XXX Fix when compaction is implemented.
3428   warning("Shrinking of CMS not yet implemented");
3429   return;
3430 }
3431 
3432 
3433 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3434 // phases.
3435 class CMSPhaseAccounting: public StackObj {
3436  public:
3437   CMSPhaseAccounting(CMSCollector *collector,
3438                      const char *phase,
3439                      bool print_cr = true);
3440   ~CMSPhaseAccounting();
3441 
3442  private:
3443   CMSCollector *_collector;
3444   const char *_phase;
3445   elapsedTimer _wallclock;
3446   bool _print_cr;
3447 
3448  public:
3449   // Not MT-safe; so do not pass around these StackObj's
3450   // where they may be accessed by other threads.
3451   jlong wallclock_millis() {
3452     assert(_wallclock.is_active(), "Wall clock should not stop");
3453     _wallclock.stop();  // to record time
3454     jlong ret = _wallclock.milliseconds();
3455     _wallclock.start(); // restart
3456     return ret;
3457   }
3458 };
3459 
3460 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3461                                        const char *phase,
3462                                        bool print_cr) :
3463   _collector(collector), _phase(phase), _print_cr(print_cr) {
3464 
3465   if (PrintCMSStatistics != 0) {
3466     _collector->resetYields();
3467   }
3468   if (PrintGCDetails && PrintGCTimeStamps) {
3469     gclog_or_tty->date_stamp(PrintGCDateStamps);
3470     gclog_or_tty->stamp();
3471     gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3472       _collector->cmsGen()->short_name(), _phase);
3473   }
3474   _collector->resetTimer();
3475   _wallclock.start();
3476   _collector->startTimer();
3477 }
3478 
3479 CMSPhaseAccounting::~CMSPhaseAccounting() {
3480   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3481   _collector->stopTimer();
3482   _wallclock.stop();
3483   if (PrintGCDetails) {
3484     gclog_or_tty->date_stamp(PrintGCDateStamps);
3485     gclog_or_tty->stamp(PrintGCTimeStamps);
3486     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3487                  _collector->cmsGen()->short_name(),
3488                  _phase, _collector->timerValue(), _wallclock.seconds());
3489     if (_print_cr) {
3490       gclog_or_tty->print_cr("");
3491     }
3492     if (PrintCMSStatistics != 0) {
3493       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3494                     _collector->yields());
3495     }
3496   }
3497 }
3498 
3499 // CMS work
3500 
3501 // Checkpoint the roots into this generation from outside
3502 // this generation. [Note this initial checkpoint need only
3503 // be approximate -- we'll do a catch up phase subsequently.]
3504 void CMSCollector::checkpointRootsInitial(bool asynch) {
3505   assert(_collectorState == InitialMarking, "Wrong collector state");
3506   check_correct_thread_executing();
3507   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3508 
3509   ReferenceProcessor* rp = ref_processor();
3510   SpecializationStats::clear();
3511   assert(_restart_addr == NULL, "Control point invariant");
3512   if (asynch) {
3513     // acquire locks for subsequent manipulations
3514     MutexLockerEx x(bitMapLock(),
3515                     Mutex::_no_safepoint_check_flag);
3516     checkpointRootsInitialWork(asynch);
3517     // enable ("weak") refs discovery
3518     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3519     _collectorState = Marking;
3520   } else {
3521     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3522     // which recognizes if we are a CMS generation, and doesn't try to turn on
3523     // discovery; verify that they aren't meddling.
3524     assert(!rp->discovery_is_atomic(),
3525            "incorrect setting of discovery predicate");
3526     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3527            "ref discovery for this generation kind");
3528     // already have locks
3529     checkpointRootsInitialWork(asynch);
3530     // now enable ("weak") refs discovery
3531     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3532     _collectorState = Marking;
3533   }
3534   SpecializationStats::print();
3535 }
3536 
3537 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3538   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3539   assert(_collectorState == InitialMarking, "just checking");
3540 
3541   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3542   // precede our marking with a collection of all
3543   // younger generations to keep floating garbage to a minimum.
3544   // XXX: we won't do this for now -- it's an optimization to be done later.
3545 
3546   // already have locks
3547   assert_lock_strong(bitMapLock());
3548   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3549 
3550   // Setup the verification and class unloading state for this
3551   // CMS collection cycle.
3552   setup_cms_unloading_and_verification_state();
3553 
3554   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3555     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3556   if (UseAdaptiveSizePolicy) {
3557     size_policy()->checkpoint_roots_initial_begin();
3558   }
3559 
3560   // Reset all the PLAB chunk arrays if necessary.
3561   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3562     reset_survivor_plab_arrays();
3563   }
3564 
3565   ResourceMark rm;
3566   HandleMark  hm;
3567 
3568   FalseClosure falseClosure;
3569   // In the case of a synchronous collection, we will elide the
3570   // remark step, so it's important to catch all the nmethod oops
3571   // in this step.
3572   // The final 'true' flag to gen_process_strong_roots will ensure this.
3573   // If 'async' is true, we can relax the nmethod tracing.
3574   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3575   GenCollectedHeap* gch = GenCollectedHeap::heap();
3576 
3577   verify_work_stacks_empty();
3578   verify_overflow_empty();
3579 
3580   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3581   // Update the saved marks which may affect the root scans.
3582   gch->save_marks();
3583 
3584   // weak reference processing has not started yet.
3585   ref_processor()->set_enqueuing_is_done(false);
3586 
3587   {
3588     // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3589     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3590     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3591     gch->gen_process_strong_roots(_cmsGen->level(),
3592                                   true,   // younger gens are roots
3593                                   true,   // activate StrongRootsScope
3594                                   true,   // collecting perm gen
3595                                   SharedHeap::ScanningOption(roots_scanning_options()),
3596                                   &notOlder,
3597                                   true,   // walk all of code cache if (so & SO_CodeCache)
3598                                   NULL);
3599   }
3600 
3601   // Clear mod-union table; it will be dirtied in the prologue of
3602   // CMS generation per each younger generation collection.
3603 
3604   assert(_modUnionTable.isAllClear(),
3605        "Was cleared in most recent final checkpoint phase"
3606        " or no bits are set in the gc_prologue before the start of the next "
3607        "subsequent marking phase.");
3608 
3609   // Save the end of the used_region of the constituent generations
3610   // to be used to limit the extent of sweep in each generation.
3611   save_sweep_limits();
3612   if (UseAdaptiveSizePolicy) {
3613     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3614   }
3615   verify_overflow_empty();
3616 }
3617 
3618 bool CMSCollector::markFromRoots(bool asynch) {
3619   // we might be tempted to assert that:
3620   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3621   //        "inconsistent argument?");
3622   // However that wouldn't be right, because it's possible that
3623   // a safepoint is indeed in progress as a younger generation
3624   // stop-the-world GC happens even as we mark in this generation.
3625   assert(_collectorState == Marking, "inconsistent state?");
3626   check_correct_thread_executing();
3627   verify_overflow_empty();
3628 
3629   bool res;
3630   if (asynch) {
3631 
3632     // Start the timers for adaptive size policy for the concurrent phases
3633     // Do it here so that the foreground MS can use the concurrent
3634     // timer since a foreground MS might has the sweep done concurrently
3635     // or STW.
3636     if (UseAdaptiveSizePolicy) {
3637       size_policy()->concurrent_marking_begin();
3638     }
3639 
3640     // Weak ref discovery note: We may be discovering weak
3641     // refs in this generation concurrent (but interleaved) with
3642     // weak ref discovery by a younger generation collector.
3643 
3644     CMSTokenSyncWithLocks ts(true, bitMapLock());
3645     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3646     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3647     res = markFromRootsWork(asynch);
3648     if (res) {
3649       _collectorState = Precleaning;
3650     } else { // We failed and a foreground collection wants to take over
3651       assert(_foregroundGCIsActive, "internal state inconsistency");
3652       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3653       if (PrintGCDetails) {
3654         gclog_or_tty->print_cr("bailing out to foreground collection");
3655       }
3656     }
3657     if (UseAdaptiveSizePolicy) {
3658       size_policy()->concurrent_marking_end();
3659     }
3660   } else {
3661     assert(SafepointSynchronize::is_at_safepoint(),
3662            "inconsistent with asynch == false");
3663     if (UseAdaptiveSizePolicy) {
3664       size_policy()->ms_collection_marking_begin();
3665     }
3666     // already have locks
3667     res = markFromRootsWork(asynch);
3668     _collectorState = FinalMarking;
3669     if (UseAdaptiveSizePolicy) {
3670       GenCollectedHeap* gch = GenCollectedHeap::heap();
3671       size_policy()->ms_collection_marking_end(gch->gc_cause());
3672     }
3673   }
3674   verify_overflow_empty();
3675   return res;
3676 }
3677 
3678 bool CMSCollector::markFromRootsWork(bool asynch) {
3679   // iterate over marked bits in bit map, doing a full scan and mark
3680   // from these roots using the following algorithm:
3681   // . if oop is to the right of the current scan pointer,
3682   //   mark corresponding bit (we'll process it later)
3683   // . else (oop is to left of current scan pointer)
3684   //   push oop on marking stack
3685   // . drain the marking stack
3686 
3687   // Note that when we do a marking step we need to hold the
3688   // bit map lock -- recall that direct allocation (by mutators)
3689   // and promotion (by younger generation collectors) is also
3690   // marking the bit map. [the so-called allocate live policy.]
3691   // Because the implementation of bit map marking is not
3692   // robust wrt simultaneous marking of bits in the same word,
3693   // we need to make sure that there is no such interference
3694   // between concurrent such updates.
3695 
3696   // already have locks
3697   assert_lock_strong(bitMapLock());
3698 
3699   // Clear the revisit stack, just in case there are any
3700   // obsolete contents from a short-circuited previous CMS cycle.
3701   _revisitStack.reset();
3702   verify_work_stacks_empty();
3703   verify_overflow_empty();
3704   assert(_revisitStack.isEmpty(), "tabula rasa");
3705   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
3706   bool result = false;
3707   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3708     result = do_marking_mt(asynch);
3709   } else {
3710     result = do_marking_st(asynch);
3711   }
3712   return result;
3713 }
3714 
3715 // Forward decl
3716 class CMSConcMarkingTask;
3717 
3718 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3719   CMSCollector*       _collector;
3720   CMSConcMarkingTask* _task;
3721  public:
3722   virtual void yield();
3723 
3724   // "n_threads" is the number of threads to be terminated.
3725   // "queue_set" is a set of work queues of other threads.
3726   // "collector" is the CMS collector associated with this task terminator.
3727   // "yield" indicates whether we need the gang as a whole to yield.
3728   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3729     ParallelTaskTerminator(n_threads, queue_set),
3730     _collector(collector) { }
3731 
3732   void set_task(CMSConcMarkingTask* task) {
3733     _task = task;
3734   }
3735 };
3736 
3737 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3738   CMSConcMarkingTask* _task;
3739  public:
3740   bool should_exit_termination();
3741   void set_task(CMSConcMarkingTask* task) {
3742     _task = task;
3743   }
3744 };
3745 
3746 // MT Concurrent Marking Task
3747 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3748   CMSCollector* _collector;
3749   int           _n_workers;                  // requested/desired # workers
3750   bool          _asynch;
3751   bool          _result;
3752   CompactibleFreeListSpace*  _cms_space;
3753   CompactibleFreeListSpace* _perm_space;
3754   char          _pad_front[64];   // padding to ...
3755   HeapWord*     _global_finger;   // ... avoid sharing cache line
3756   char          _pad_back[64];
3757   HeapWord*     _restart_addr;
3758 
3759   //  Exposed here for yielding support
3760   Mutex* const _bit_map_lock;
3761 
3762   // The per thread work queues, available here for stealing
3763   OopTaskQueueSet*  _task_queues;
3764 
3765   // Termination (and yielding) support
3766   CMSConcMarkingTerminator _term;
3767   CMSConcMarkingTerminatorTerminator _term_term;
3768 
3769  public:
3770   CMSConcMarkingTask(CMSCollector* collector,
3771                  CompactibleFreeListSpace* cms_space,
3772                  CompactibleFreeListSpace* perm_space,
3773                  bool asynch,
3774                  YieldingFlexibleWorkGang* workers,
3775                  OopTaskQueueSet* task_queues):
3776     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3777     _collector(collector),
3778     _cms_space(cms_space),
3779     _perm_space(perm_space),
3780     _asynch(asynch), _n_workers(0), _result(true),
3781     _task_queues(task_queues),
3782     _term(_n_workers, task_queues, _collector),
3783     _bit_map_lock(collector->bitMapLock())
3784   {
3785     _requested_size = _n_workers;
3786     _term.set_task(this);
3787     _term_term.set_task(this);
3788     assert(_cms_space->bottom() < _perm_space->bottom(),
3789            "Finger incorrectly initialized below");
3790     _restart_addr = _global_finger = _cms_space->bottom();
3791   }
3792 
3793 
3794   OopTaskQueueSet* task_queues()  { return _task_queues; }
3795 
3796   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3797 
3798   HeapWord** global_finger_addr() { return &_global_finger; }
3799 
3800   CMSConcMarkingTerminator* terminator() { return &_term; }
3801 
3802   virtual void set_for_termination(int active_workers) {
3803     terminator()->reset_for_reuse(active_workers);
3804   }
3805 
3806   void work(uint worker_id);
3807   bool should_yield() {
3808     return    ConcurrentMarkSweepThread::should_yield()
3809            && !_collector->foregroundGCIsActive()
3810            && _asynch;
3811   }
3812 
3813   virtual void coordinator_yield();  // stuff done by coordinator
3814   bool result() { return _result; }
3815 
3816   void reset(HeapWord* ra) {
3817     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3818     assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)");
3819     assert(ra             <  _perm_space->end(), "ra too large");
3820     _restart_addr = _global_finger = ra;
3821     _term.reset_for_reuse();
3822   }
3823 
3824   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3825                                            OopTaskQueue* work_q);
3826 
3827  private:
3828   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3829   void do_work_steal(int i);
3830   void bump_global_finger(HeapWord* f);
3831 };
3832 
3833 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3834   assert(_task != NULL, "Error");
3835   return _task->yielding();
3836   // Note that we do not need the disjunct || _task->should_yield() above
3837   // because we want terminating threads to yield only if the task
3838   // is already in the midst of yielding, which happens only after at least one
3839   // thread has yielded.
3840 }
3841 
3842 void CMSConcMarkingTerminator::yield() {
3843   if (_task->should_yield()) {
3844     _task->yield();
3845   } else {
3846     ParallelTaskTerminator::yield();
3847   }
3848 }
3849 
3850 ////////////////////////////////////////////////////////////////
3851 // Concurrent Marking Algorithm Sketch
3852 ////////////////////////////////////////////////////////////////
3853 // Until all tasks exhausted (both spaces):
3854 // -- claim next available chunk
3855 // -- bump global finger via CAS
3856 // -- find first object that starts in this chunk
3857 //    and start scanning bitmap from that position
3858 // -- scan marked objects for oops
3859 // -- CAS-mark target, and if successful:
3860 //    . if target oop is above global finger (volatile read)
3861 //      nothing to do
3862 //    . if target oop is in chunk and above local finger
3863 //        then nothing to do
3864 //    . else push on work-queue
3865 // -- Deal with possible overflow issues:
3866 //    . local work-queue overflow causes stuff to be pushed on
3867 //      global (common) overflow queue
3868 //    . always first empty local work queue
3869 //    . then get a batch of oops from global work queue if any
3870 //    . then do work stealing
3871 // -- When all tasks claimed (both spaces)
3872 //    and local work queue empty,
3873 //    then in a loop do:
3874 //    . check global overflow stack; steal a batch of oops and trace
3875 //    . try to steal from other threads oif GOS is empty
3876 //    . if neither is available, offer termination
3877 // -- Terminate and return result
3878 //
3879 void CMSConcMarkingTask::work(uint worker_id) {
3880   elapsedTimer _timer;
3881   ResourceMark rm;
3882   HandleMark hm;
3883 
3884   DEBUG_ONLY(_collector->verify_overflow_empty();)
3885 
3886   // Before we begin work, our work queue should be empty
3887   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3888   // Scan the bitmap covering _cms_space, tracing through grey objects.
3889   _timer.start();
3890   do_scan_and_mark(worker_id, _cms_space);
3891   _timer.stop();
3892   if (PrintCMSStatistics != 0) {
3893     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3894       worker_id, _timer.seconds());
3895       // XXX: need xxx/xxx type of notation, two timers
3896   }
3897 
3898   // ... do the same for the _perm_space
3899   _timer.reset();
3900   _timer.start();
3901   do_scan_and_mark(worker_id, _perm_space);
3902   _timer.stop();
3903   if (PrintCMSStatistics != 0) {
3904     gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
3905       worker_id, _timer.seconds());
3906       // XXX: need xxx/xxx type of notation, two timers
3907   }
3908 
3909   // ... do work stealing
3910   _timer.reset();
3911   _timer.start();
3912   do_work_steal(worker_id);
3913   _timer.stop();
3914   if (PrintCMSStatistics != 0) {
3915     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3916       worker_id, _timer.seconds());
3917       // XXX: need xxx/xxx type of notation, two timers
3918   }
3919   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3920   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3921   // Note that under the current task protocol, the
3922   // following assertion is true even of the spaces
3923   // expanded since the completion of the concurrent
3924   // marking. XXX This will likely change under a strict
3925   // ABORT semantics.
3926   assert(_global_finger >  _cms_space->end() &&
3927          _global_finger >= _perm_space->end(),
3928          "All tasks have been completed");
3929   DEBUG_ONLY(_collector->verify_overflow_empty();)
3930 }
3931 
3932 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3933   HeapWord* read = _global_finger;
3934   HeapWord* cur  = read;
3935   while (f > read) {
3936     cur = read;
3937     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3938     if (cur == read) {
3939       // our cas succeeded
3940       assert(_global_finger >= f, "protocol consistency");
3941       break;
3942     }
3943   }
3944 }
3945 
3946 // This is really inefficient, and should be redone by
3947 // using (not yet available) block-read and -write interfaces to the
3948 // stack and the work_queue. XXX FIX ME !!!
3949 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3950                                                       OopTaskQueue* work_q) {
3951   // Fast lock-free check
3952   if (ovflw_stk->length() == 0) {
3953     return false;
3954   }
3955   assert(work_q->size() == 0, "Shouldn't steal");
3956   MutexLockerEx ml(ovflw_stk->par_lock(),
3957                    Mutex::_no_safepoint_check_flag);
3958   // Grab up to 1/4 the size of the work queue
3959   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3960                     (size_t)ParGCDesiredObjsFromOverflowList);
3961   num = MIN2(num, ovflw_stk->length());
3962   for (int i = (int) num; i > 0; i--) {
3963     oop cur = ovflw_stk->pop();
3964     assert(cur != NULL, "Counted wrong?");
3965     work_q->push(cur);
3966   }
3967   return num > 0;
3968 }
3969 
3970 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3971   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3972   int n_tasks = pst->n_tasks();
3973   // We allow that there may be no tasks to do here because
3974   // we are restarting after a stack overflow.
3975   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3976   uint nth_task = 0;
3977 
3978   HeapWord* aligned_start = sp->bottom();
3979   if (sp->used_region().contains(_restart_addr)) {
3980     // Align down to a card boundary for the start of 0th task
3981     // for this space.
3982     aligned_start =
3983       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3984                                  CardTableModRefBS::card_size);
3985   }
3986 
3987   size_t chunk_size = sp->marking_task_size();
3988   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3989     // Having claimed the nth task in this space,
3990     // compute the chunk that it corresponds to:
3991     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3992                                aligned_start + (nth_task+1)*chunk_size);
3993     // Try and bump the global finger via a CAS;
3994     // note that we need to do the global finger bump
3995     // _before_ taking the intersection below, because
3996     // the task corresponding to that region will be
3997     // deemed done even if the used_region() expands
3998     // because of allocation -- as it almost certainly will
3999     // during start-up while the threads yield in the
4000     // closure below.
4001     HeapWord* finger = span.end();
4002     bump_global_finger(finger);   // atomically
4003     // There are null tasks here corresponding to chunks
4004     // beyond the "top" address of the space.
4005     span = span.intersection(sp->used_region());
4006     if (!span.is_empty()) {  // Non-null task
4007       HeapWord* prev_obj;
4008       assert(!span.contains(_restart_addr) || nth_task == 0,
4009              "Inconsistency");
4010       if (nth_task == 0) {
4011         // For the 0th task, we'll not need to compute a block_start.
4012         if (span.contains(_restart_addr)) {
4013           // In the case of a restart because of stack overflow,
4014           // we might additionally skip a chunk prefix.
4015           prev_obj = _restart_addr;
4016         } else {
4017           prev_obj = span.start();
4018         }
4019       } else {
4020         // We want to skip the first object because
4021         // the protocol is to scan any object in its entirety
4022         // that _starts_ in this span; a fortiori, any
4023         // object starting in an earlier span is scanned
4024         // as part of an earlier claimed task.
4025         // Below we use the "careful" version of block_start
4026         // so we do not try to navigate uninitialized objects.
4027         prev_obj = sp->block_start_careful(span.start());
4028         // Below we use a variant of block_size that uses the
4029         // Printezis bits to avoid waiting for allocated
4030         // objects to become initialized/parsable.
4031         while (prev_obj < span.start()) {
4032           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4033           if (sz > 0) {
4034             prev_obj += sz;
4035           } else {
4036             // In this case we may end up doing a bit of redundant
4037             // scanning, but that appears unavoidable, short of
4038             // locking the free list locks; see bug 6324141.
4039             break;
4040           }
4041         }
4042       }
4043       if (prev_obj < span.end()) {
4044         MemRegion my_span = MemRegion(prev_obj, span.end());
4045         // Do the marking work within a non-empty span --
4046         // the last argument to the constructor indicates whether the
4047         // iteration should be incremental with periodic yields.
4048         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4049                                     &_collector->_markBitMap,
4050                                     work_queue(i),
4051                                     &_collector->_markStack,
4052                                     &_collector->_revisitStack,
4053                                     _asynch);
4054         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4055       } // else nothing to do for this task
4056     }   // else nothing to do for this task
4057   }
4058   // We'd be tempted to assert here that since there are no
4059   // more tasks left to claim in this space, the global_finger
4060   // must exceed space->top() and a fortiori space->end(). However,
4061   // that would not quite be correct because the bumping of
4062   // global_finger occurs strictly after the claiming of a task,
4063   // so by the time we reach here the global finger may not yet
4064   // have been bumped up by the thread that claimed the last
4065   // task.
4066   pst->all_tasks_completed();
4067 }
4068 
4069 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
4070  private:
4071   CMSConcMarkingTask* _task;
4072   MemRegion     _span;
4073   CMSBitMap*    _bit_map;
4074   CMSMarkStack* _overflow_stack;
4075   OopTaskQueue* _work_queue;
4076  protected:
4077   DO_OOP_WORK_DEFN
4078  public:
4079   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4080                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
4081                          CMSMarkStack* revisit_stack):
4082     Par_KlassRememberingOopClosure(collector, collector->ref_processor(), revisit_stack),
4083     _task(task),
4084     _span(collector->_span),
4085     _work_queue(work_queue),
4086     _bit_map(bit_map),
4087     _overflow_stack(overflow_stack)
4088   { }
4089   virtual void do_oop(oop* p);
4090   virtual void do_oop(narrowOop* p);
4091   void trim_queue(size_t max);
4092   void handle_stack_overflow(HeapWord* lost);
4093   void do_yield_check() {
4094     if (_task->should_yield()) {
4095       _task->yield();
4096     }
4097   }
4098 };
4099 
4100 // Grey object scanning during work stealing phase --
4101 // the salient assumption here is that any references
4102 // that are in these stolen objects being scanned must
4103 // already have been initialized (else they would not have
4104 // been published), so we do not need to check for
4105 // uninitialized objects before pushing here.
4106 void Par_ConcMarkingClosure::do_oop(oop obj) {
4107   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4108   HeapWord* addr = (HeapWord*)obj;
4109   // Check if oop points into the CMS generation
4110   // and is not marked
4111   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4112     // a white object ...
4113     // If we manage to "claim" the object, by being the
4114     // first thread to mark it, then we push it on our
4115     // marking stack
4116     if (_bit_map->par_mark(addr)) {     // ... now grey
4117       // push on work queue (grey set)
4118       bool simulate_overflow = false;
4119       NOT_PRODUCT(
4120         if (CMSMarkStackOverflowALot &&
4121             _collector->simulate_overflow()) {
4122           // simulate a stack overflow
4123           simulate_overflow = true;
4124         }
4125       )
4126       if (simulate_overflow ||
4127           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4128         // stack overflow
4129         if (PrintCMSStatistics != 0) {
4130           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4131                                  SIZE_FORMAT, _overflow_stack->capacity());
4132         }
4133         // We cannot assert that the overflow stack is full because
4134         // it may have been emptied since.
4135         assert(simulate_overflow ||
4136                _work_queue->size() == _work_queue->max_elems(),
4137               "Else push should have succeeded");
4138         handle_stack_overflow(addr);
4139       }
4140     } // Else, some other thread got there first
4141     do_yield_check();
4142   }
4143 }
4144 
4145 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4146 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4147 
4148 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4149   while (_work_queue->size() > max) {
4150     oop new_oop;
4151     if (_work_queue->pop_local(new_oop)) {
4152       assert(new_oop->is_oop(), "Should be an oop");
4153       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4154       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4155       assert(new_oop->is_parsable(), "Should be parsable");
4156       new_oop->oop_iterate(this);  // do_oop() above
4157       do_yield_check();
4158     }
4159   }
4160 }
4161 
4162 // Upon stack overflow, we discard (part of) the stack,
4163 // remembering the least address amongst those discarded
4164 // in CMSCollector's _restart_address.
4165 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4166   // We need to do this under a mutex to prevent other
4167   // workers from interfering with the work done below.
4168   MutexLockerEx ml(_overflow_stack->par_lock(),
4169                    Mutex::_no_safepoint_check_flag);
4170   // Remember the least grey address discarded
4171   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4172   _collector->lower_restart_addr(ra);
4173   _overflow_stack->reset();  // discard stack contents
4174   _overflow_stack->expand(); // expand the stack if possible
4175 }
4176 
4177 
4178 void CMSConcMarkingTask::do_work_steal(int i) {
4179   OopTaskQueue* work_q = work_queue(i);
4180   oop obj_to_scan;
4181   CMSBitMap* bm = &(_collector->_markBitMap);
4182   CMSMarkStack* ovflw = &(_collector->_markStack);
4183   CMSMarkStack* revisit = &(_collector->_revisitStack);
4184   int* seed = _collector->hash_seed(i);
4185   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw, revisit);
4186   while (true) {
4187     cl.trim_queue(0);
4188     assert(work_q->size() == 0, "Should have been emptied above");
4189     if (get_work_from_overflow_stack(ovflw, work_q)) {
4190       // Can't assert below because the work obtained from the
4191       // overflow stack may already have been stolen from us.
4192       // assert(work_q->size() > 0, "Work from overflow stack");
4193       continue;
4194     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4195       assert(obj_to_scan->is_oop(), "Should be an oop");
4196       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4197       obj_to_scan->oop_iterate(&cl);
4198     } else if (terminator()->offer_termination(&_term_term)) {
4199       assert(work_q->size() == 0, "Impossible!");
4200       break;
4201     } else if (yielding() || should_yield()) {
4202       yield();
4203     }
4204   }
4205 }
4206 
4207 // This is run by the CMS (coordinator) thread.
4208 void CMSConcMarkingTask::coordinator_yield() {
4209   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4210          "CMS thread should hold CMS token");
4211   DEBUG_ONLY(RememberKlassesChecker mux(false);)
4212   // First give up the locks, then yield, then re-lock
4213   // We should probably use a constructor/destructor idiom to
4214   // do this unlock/lock or modify the MutexUnlocker class to
4215   // serve our purpose. XXX
4216   assert_lock_strong(_bit_map_lock);
4217   _bit_map_lock->unlock();
4218   ConcurrentMarkSweepThread::desynchronize(true);
4219   ConcurrentMarkSweepThread::acknowledge_yield_request();
4220   _collector->stopTimer();
4221   if (PrintCMSStatistics != 0) {
4222     _collector->incrementYields();
4223   }
4224   _collector->icms_wait();
4225 
4226   // It is possible for whichever thread initiated the yield request
4227   // not to get a chance to wake up and take the bitmap lock between
4228   // this thread releasing it and reacquiring it. So, while the
4229   // should_yield() flag is on, let's sleep for a bit to give the
4230   // other thread a chance to wake up. The limit imposed on the number
4231   // of iterations is defensive, to avoid any unforseen circumstances
4232   // putting us into an infinite loop. Since it's always been this
4233   // (coordinator_yield()) method that was observed to cause the
4234   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4235   // which is by default non-zero. For the other seven methods that
4236   // also perform the yield operation, as are using a different
4237   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4238   // can enable the sleeping for those methods too, if necessary.
4239   // See 6442774.
4240   //
4241   // We really need to reconsider the synchronization between the GC
4242   // thread and the yield-requesting threads in the future and we
4243   // should really use wait/notify, which is the recommended
4244   // way of doing this type of interaction. Additionally, we should
4245   // consolidate the eight methods that do the yield operation and they
4246   // are almost identical into one for better maintenability and
4247   // readability. See 6445193.
4248   //
4249   // Tony 2006.06.29
4250   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4251                    ConcurrentMarkSweepThread::should_yield() &&
4252                    !CMSCollector::foregroundGCIsActive(); ++i) {
4253     os::sleep(Thread::current(), 1, false);
4254     ConcurrentMarkSweepThread::acknowledge_yield_request();
4255   }
4256 
4257   ConcurrentMarkSweepThread::synchronize(true);
4258   _bit_map_lock->lock_without_safepoint_check();
4259   _collector->startTimer();
4260 }
4261 
4262 bool CMSCollector::do_marking_mt(bool asynch) {
4263   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4264   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4265                                        conc_workers()->total_workers(),
4266                                        conc_workers()->active_workers(),
4267                                        Threads::number_of_non_daemon_threads());
4268   conc_workers()->set_active_workers(num_workers);
4269 
4270   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4271   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
4272 
4273   CMSConcMarkingTask tsk(this,
4274                          cms_space,
4275                          perm_space,
4276                          asynch,
4277                          conc_workers(),
4278                          task_queues());
4279 
4280   // Since the actual number of workers we get may be different
4281   // from the number we requested above, do we need to do anything different
4282   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4283   // class?? XXX
4284   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4285   perm_space->initialize_sequential_subtasks_for_marking(num_workers);
4286 
4287   // Refs discovery is already non-atomic.
4288   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4289   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4290   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
4291   conc_workers()->start_task(&tsk);
4292   while (tsk.yielded()) {
4293     tsk.coordinator_yield();
4294     conc_workers()->continue_task(&tsk);
4295   }
4296   // If the task was aborted, _restart_addr will be non-NULL
4297   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4298   while (_restart_addr != NULL) {
4299     // XXX For now we do not make use of ABORTED state and have not
4300     // yet implemented the right abort semantics (even in the original
4301     // single-threaded CMS case). That needs some more investigation
4302     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4303     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4304     // If _restart_addr is non-NULL, a marking stack overflow
4305     // occurred; we need to do a fresh marking iteration from the
4306     // indicated restart address.
4307     if (_foregroundGCIsActive && asynch) {
4308       // We may be running into repeated stack overflows, having
4309       // reached the limit of the stack size, while making very
4310       // slow forward progress. It may be best to bail out and
4311       // let the foreground collector do its job.
4312       // Clear _restart_addr, so that foreground GC
4313       // works from scratch. This avoids the headache of
4314       // a "rescan" which would otherwise be needed because
4315       // of the dirty mod union table & card table.
4316       _restart_addr = NULL;
4317       return false;
4318     }
4319     // Adjust the task to restart from _restart_addr
4320     tsk.reset(_restart_addr);
4321     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4322                   _restart_addr);
4323     perm_space->initialize_sequential_subtasks_for_marking(num_workers,
4324                   _restart_addr);
4325     _restart_addr = NULL;
4326     // Get the workers going again
4327     conc_workers()->start_task(&tsk);
4328     while (tsk.yielded()) {
4329       tsk.coordinator_yield();
4330       conc_workers()->continue_task(&tsk);
4331     }
4332   }
4333   assert(tsk.completed(), "Inconsistency");
4334   assert(tsk.result() == true, "Inconsistency");
4335   return true;
4336 }
4337 
4338 bool CMSCollector::do_marking_st(bool asynch) {
4339   ResourceMark rm;
4340   HandleMark   hm;
4341 
4342   // Temporarily make refs discovery single threaded (non-MT)
4343   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4344   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4345     &_markStack, &_revisitStack, CMSYield && asynch);
4346   // the last argument to iterate indicates whether the iteration
4347   // should be incremental with periodic yields.
4348   _markBitMap.iterate(&markFromRootsClosure);
4349   // If _restart_addr is non-NULL, a marking stack overflow
4350   // occurred; we need to do a fresh iteration from the
4351   // indicated restart address.
4352   while (_restart_addr != NULL) {
4353     if (_foregroundGCIsActive && asynch) {
4354       // We may be running into repeated stack overflows, having
4355       // reached the limit of the stack size, while making very
4356       // slow forward progress. It may be best to bail out and
4357       // let the foreground collector do its job.
4358       // Clear _restart_addr, so that foreground GC
4359       // works from scratch. This avoids the headache of
4360       // a "rescan" which would otherwise be needed because
4361       // of the dirty mod union table & card table.
4362       _restart_addr = NULL;
4363       return false;  // indicating failure to complete marking
4364     }
4365     // Deal with stack overflow:
4366     // we restart marking from _restart_addr
4367     HeapWord* ra = _restart_addr;
4368     markFromRootsClosure.reset(ra);
4369     _restart_addr = NULL;
4370     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4371   }
4372   return true;
4373 }
4374 
4375 void CMSCollector::preclean() {
4376   check_correct_thread_executing();
4377   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4378   verify_work_stacks_empty();
4379   verify_overflow_empty();
4380   _abort_preclean = false;
4381   if (CMSPrecleaningEnabled) {
4382     _eden_chunk_index = 0;
4383     size_t used = get_eden_used();
4384     size_t capacity = get_eden_capacity();
4385     // Don't start sampling unless we will get sufficiently
4386     // many samples.
4387     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4388                 * CMSScheduleRemarkEdenPenetration)) {
4389       _start_sampling = true;
4390     } else {
4391       _start_sampling = false;
4392     }
4393     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4394     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4395     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4396   }
4397   CMSTokenSync x(true); // is cms thread
4398   if (CMSPrecleaningEnabled) {
4399     sample_eden();
4400     _collectorState = AbortablePreclean;
4401   } else {
4402     _collectorState = FinalMarking;
4403   }
4404   verify_work_stacks_empty();
4405   verify_overflow_empty();
4406 }
4407 
4408 // Try and schedule the remark such that young gen
4409 // occupancy is CMSScheduleRemarkEdenPenetration %.
4410 void CMSCollector::abortable_preclean() {
4411   check_correct_thread_executing();
4412   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4413   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4414 
4415   // If Eden's current occupancy is below this threshold,
4416   // immediately schedule the remark; else preclean
4417   // past the next scavenge in an effort to
4418   // schedule the pause as described avove. By choosing
4419   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4420   // we will never do an actual abortable preclean cycle.
4421   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4422     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4423     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4424     // We need more smarts in the abortable preclean
4425     // loop below to deal with cases where allocation
4426     // in young gen is very very slow, and our precleaning
4427     // is running a losing race against a horde of
4428     // mutators intent on flooding us with CMS updates
4429     // (dirty cards).
4430     // One, admittedly dumb, strategy is to give up
4431     // after a certain number of abortable precleaning loops
4432     // or after a certain maximum time. We want to make
4433     // this smarter in the next iteration.
4434     // XXX FIX ME!!! YSR
4435     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4436     while (!(should_abort_preclean() ||
4437              ConcurrentMarkSweepThread::should_terminate())) {
4438       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4439       cumworkdone += workdone;
4440       loops++;
4441       // Voluntarily terminate abortable preclean phase if we have
4442       // been at it for too long.
4443       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4444           loops >= CMSMaxAbortablePrecleanLoops) {
4445         if (PrintGCDetails) {
4446           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4447         }
4448         break;
4449       }
4450       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4451         if (PrintGCDetails) {
4452           gclog_or_tty->print(" CMS: abort preclean due to time ");
4453         }
4454         break;
4455       }
4456       // If we are doing little work each iteration, we should
4457       // take a short break.
4458       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4459         // Sleep for some time, waiting for work to accumulate
4460         stopTimer();
4461         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4462         startTimer();
4463         waited++;
4464       }
4465     }
4466     if (PrintCMSStatistics > 0) {
4467       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4468                           loops, waited, cumworkdone);
4469     }
4470   }
4471   CMSTokenSync x(true); // is cms thread
4472   if (_collectorState != Idling) {
4473     assert(_collectorState == AbortablePreclean,
4474            "Spontaneous state transition?");
4475     _collectorState = FinalMarking;
4476   } // Else, a foreground collection completed this CMS cycle.
4477   return;
4478 }
4479 
4480 // Respond to an Eden sampling opportunity
4481 void CMSCollector::sample_eden() {
4482   // Make sure a young gc cannot sneak in between our
4483   // reading and recording of a sample.
4484   assert(Thread::current()->is_ConcurrentGC_thread(),
4485          "Only the cms thread may collect Eden samples");
4486   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4487          "Should collect samples while holding CMS token");
4488   if (!_start_sampling) {
4489     return;
4490   }
4491   if (_eden_chunk_array) {
4492     if (_eden_chunk_index < _eden_chunk_capacity) {
4493       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4494       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4495              "Unexpected state of Eden");
4496       // We'd like to check that what we just sampled is an oop-start address;
4497       // however, we cannot do that here since the object may not yet have been
4498       // initialized. So we'll instead do the check when we _use_ this sample
4499       // later.
4500       if (_eden_chunk_index == 0 ||
4501           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4502                          _eden_chunk_array[_eden_chunk_index-1])
4503            >= CMSSamplingGrain)) {
4504         _eden_chunk_index++;  // commit sample
4505       }
4506     }
4507   }
4508   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4509     size_t used = get_eden_used();
4510     size_t capacity = get_eden_capacity();
4511     assert(used <= capacity, "Unexpected state of Eden");
4512     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4513       _abort_preclean = true;
4514     }
4515   }
4516 }
4517 
4518 
4519 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4520   assert(_collectorState == Precleaning ||
4521          _collectorState == AbortablePreclean, "incorrect state");
4522   ResourceMark rm;
4523   HandleMark   hm;
4524 
4525   // Precleaning is currently not MT but the reference processor
4526   // may be set for MT.  Disable it temporarily here.
4527   ReferenceProcessor* rp = ref_processor();
4528   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4529 
4530   // Do one pass of scrubbing the discovered reference lists
4531   // to remove any reference objects with strongly-reachable
4532   // referents.
4533   if (clean_refs) {
4534     CMSPrecleanRefsYieldClosure yield_cl(this);
4535     assert(rp->span().equals(_span), "Spans should be equal");
4536     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4537                                    &_markStack, &_revisitStack,
4538                                    true /* preclean */);
4539     CMSDrainMarkingStackClosure complete_trace(this,
4540                                    _span, &_markBitMap, &_markStack,
4541                                    &keep_alive, true /* preclean */);
4542 
4543     // We don't want this step to interfere with a young
4544     // collection because we don't want to take CPU
4545     // or memory bandwidth away from the young GC threads
4546     // (which may be as many as there are CPUs).
4547     // Note that we don't need to protect ourselves from
4548     // interference with mutators because they can't
4549     // manipulate the discovered reference lists nor affect
4550     // the computed reachability of the referents, the
4551     // only properties manipulated by the precleaning
4552     // of these reference lists.
4553     stopTimer();
4554     CMSTokenSyncWithLocks x(true /* is cms thread */,
4555                             bitMapLock());
4556     startTimer();
4557     sample_eden();
4558 
4559     // The following will yield to allow foreground
4560     // collection to proceed promptly. XXX YSR:
4561     // The code in this method may need further
4562     // tweaking for better performance and some restructuring
4563     // for cleaner interfaces.
4564     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4565     rp->preclean_discovered_references(
4566           rp->is_alive_non_header(), &keep_alive, &complete_trace,
4567           &yield_cl, should_unload_classes(), gc_timer);
4568   }
4569 
4570   if (clean_survivor) {  // preclean the active survivor space(s)
4571     assert(_young_gen->kind() == Generation::DefNew ||
4572            _young_gen->kind() == Generation::ParNew ||
4573            _young_gen->kind() == Generation::ASParNew,
4574          "incorrect type for cast");
4575     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4576     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4577                              &_markBitMap, &_modUnionTable,
4578                              &_markStack, &_revisitStack,
4579                              true /* precleaning phase */);
4580     stopTimer();
4581     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4582                              bitMapLock());
4583     startTimer();
4584     unsigned int before_count =
4585       GenCollectedHeap::heap()->total_collections();
4586     SurvivorSpacePrecleanClosure
4587       sss_cl(this, _span, &_markBitMap, &_markStack,
4588              &pam_cl, before_count, CMSYield);
4589     DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4590     dng->from()->object_iterate_careful(&sss_cl);
4591     dng->to()->object_iterate_careful(&sss_cl);
4592   }
4593   MarkRefsIntoAndScanClosure
4594     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4595              &_markStack, &_revisitStack, this, CMSYield,
4596              true /* precleaning phase */);
4597   // CAUTION: The following closure has persistent state that may need to
4598   // be reset upon a decrease in the sequence of addresses it
4599   // processes.
4600   ScanMarkedObjectsAgainCarefullyClosure
4601     smoac_cl(this, _span,
4602       &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
4603 
4604   // Preclean dirty cards in ModUnionTable and CardTable using
4605   // appropriate convergence criterion;
4606   // repeat CMSPrecleanIter times unless we find that
4607   // we are losing.
4608   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4609   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4610          "Bad convergence multiplier");
4611   assert(CMSPrecleanThreshold >= 100,
4612          "Unreasonably low CMSPrecleanThreshold");
4613 
4614   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4615   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4616        numIter < CMSPrecleanIter;
4617        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4618     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4619     if (CMSPermGenPrecleaningEnabled) {
4620       curNumCards  += preclean_mod_union_table(_permGen, &smoac_cl);
4621     }
4622     if (Verbose && PrintGCDetails) {
4623       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4624     }
4625     // Either there are very few dirty cards, so re-mark
4626     // pause will be small anyway, or our pre-cleaning isn't
4627     // that much faster than the rate at which cards are being
4628     // dirtied, so we might as well stop and re-mark since
4629     // precleaning won't improve our re-mark time by much.
4630     if (curNumCards <= CMSPrecleanThreshold ||
4631         (numIter > 0 &&
4632          (curNumCards * CMSPrecleanDenominator >
4633          lastNumCards * CMSPrecleanNumerator))) {
4634       numIter++;
4635       cumNumCards += curNumCards;
4636       break;
4637     }
4638   }
4639   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4640   if (CMSPermGenPrecleaningEnabled) {
4641     curNumCards += preclean_card_table(_permGen, &smoac_cl);
4642   }
4643   cumNumCards += curNumCards;
4644   if (PrintGCDetails && PrintCMSStatistics != 0) {
4645     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4646                   curNumCards, cumNumCards, numIter);
4647   }
4648   return cumNumCards;   // as a measure of useful work done
4649 }
4650 
4651 // PRECLEANING NOTES:
4652 // Precleaning involves:
4653 // . reading the bits of the modUnionTable and clearing the set bits.
4654 // . For the cards corresponding to the set bits, we scan the
4655 //   objects on those cards. This means we need the free_list_lock
4656 //   so that we can safely iterate over the CMS space when scanning
4657 //   for oops.
4658 // . When we scan the objects, we'll be both reading and setting
4659 //   marks in the marking bit map, so we'll need the marking bit map.
4660 // . For protecting _collector_state transitions, we take the CGC_lock.
4661 //   Note that any races in the reading of of card table entries by the
4662 //   CMS thread on the one hand and the clearing of those entries by the
4663 //   VM thread or the setting of those entries by the mutator threads on the
4664 //   other are quite benign. However, for efficiency it makes sense to keep
4665 //   the VM thread from racing with the CMS thread while the latter is
4666 //   dirty card info to the modUnionTable. We therefore also use the
4667 //   CGC_lock to protect the reading of the card table and the mod union
4668 //   table by the CM thread.
4669 // . We run concurrently with mutator updates, so scanning
4670 //   needs to be done carefully  -- we should not try to scan
4671 //   potentially uninitialized objects.
4672 //
4673 // Locking strategy: While holding the CGC_lock, we scan over and
4674 // reset a maximal dirty range of the mod union / card tables, then lock
4675 // the free_list_lock and bitmap lock to do a full marking, then
4676 // release these locks; and repeat the cycle. This allows for a
4677 // certain amount of fairness in the sharing of these locks between
4678 // the CMS collector on the one hand, and the VM thread and the
4679 // mutators on the other.
4680 
4681 // NOTE: preclean_mod_union_table() and preclean_card_table()
4682 // further below are largely identical; if you need to modify
4683 // one of these methods, please check the other method too.
4684 
4685 size_t CMSCollector::preclean_mod_union_table(
4686   ConcurrentMarkSweepGeneration* gen,
4687   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4688   verify_work_stacks_empty();
4689   verify_overflow_empty();
4690 
4691   // Turn off checking for this method but turn it back on
4692   // selectively.  There are yield points in this method
4693   // but it is difficult to turn the checking off just around
4694   // the yield points.  It is simpler to selectively turn
4695   // it on.
4696   DEBUG_ONLY(RememberKlassesChecker mux(false);)
4697 
4698   // strategy: starting with the first card, accumulate contiguous
4699   // ranges of dirty cards; clear these cards, then scan the region
4700   // covered by these cards.
4701 
4702   // Since all of the MUT is committed ahead, we can just use
4703   // that, in case the generations expand while we are precleaning.
4704   // It might also be fine to just use the committed part of the
4705   // generation, but we might potentially miss cards when the
4706   // generation is rapidly expanding while we are in the midst
4707   // of precleaning.
4708   HeapWord* startAddr = gen->reserved().start();
4709   HeapWord* endAddr   = gen->reserved().end();
4710 
4711   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4712 
4713   size_t numDirtyCards, cumNumDirtyCards;
4714   HeapWord *nextAddr, *lastAddr;
4715   for (cumNumDirtyCards = numDirtyCards = 0,
4716        nextAddr = lastAddr = startAddr;
4717        nextAddr < endAddr;
4718        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4719 
4720     ResourceMark rm;
4721     HandleMark   hm;
4722 
4723     MemRegion dirtyRegion;
4724     {
4725       stopTimer();
4726       // Potential yield point
4727       CMSTokenSync ts(true);
4728       startTimer();
4729       sample_eden();
4730       // Get dirty region starting at nextOffset (inclusive),
4731       // simultaneously clearing it.
4732       dirtyRegion =
4733         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4734       assert(dirtyRegion.start() >= nextAddr,
4735              "returned region inconsistent?");
4736     }
4737     // Remember where the next search should begin.
4738     // The returned region (if non-empty) is a right open interval,
4739     // so lastOffset is obtained from the right end of that
4740     // interval.
4741     lastAddr = dirtyRegion.end();
4742     // Should do something more transparent and less hacky XXX
4743     numDirtyCards =
4744       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4745 
4746     // We'll scan the cards in the dirty region (with periodic
4747     // yields for foreground GC as needed).
4748     if (!dirtyRegion.is_empty()) {
4749       assert(numDirtyCards > 0, "consistency check");
4750       HeapWord* stop_point = NULL;
4751       stopTimer();
4752       // Potential yield point
4753       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4754                                bitMapLock());
4755       startTimer();
4756       {
4757         verify_work_stacks_empty();
4758         verify_overflow_empty();
4759         sample_eden();
4760         DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4761         stop_point =
4762           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4763       }
4764       if (stop_point != NULL) {
4765         // The careful iteration stopped early either because it found an
4766         // uninitialized object, or because we were in the midst of an
4767         // "abortable preclean", which should now be aborted. Redirty
4768         // the bits corresponding to the partially-scanned or unscanned
4769         // cards. We'll either restart at the next block boundary or
4770         // abort the preclean.
4771         assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
4772                (_collectorState == AbortablePreclean && should_abort_preclean()),
4773                "Unparsable objects should only be in perm gen.");
4774         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4775         if (should_abort_preclean()) {
4776           break; // out of preclean loop
4777         } else {
4778           // Compute the next address at which preclean should pick up;
4779           // might need bitMapLock in order to read P-bits.
4780           lastAddr = next_card_start_after_block(stop_point);
4781         }
4782       }
4783     } else {
4784       assert(lastAddr == endAddr, "consistency check");
4785       assert(numDirtyCards == 0, "consistency check");
4786       break;
4787     }
4788   }
4789   verify_work_stacks_empty();
4790   verify_overflow_empty();
4791   return cumNumDirtyCards;
4792 }
4793 
4794 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4795 // below are largely identical; if you need to modify
4796 // one of these methods, please check the other method too.
4797 
4798 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4799   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4800   // strategy: it's similar to precleamModUnionTable above, in that
4801   // we accumulate contiguous ranges of dirty cards, mark these cards
4802   // precleaned, then scan the region covered by these cards.
4803   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4804   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4805 
4806   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4807 
4808   size_t numDirtyCards, cumNumDirtyCards;
4809   HeapWord *lastAddr, *nextAddr;
4810 
4811   for (cumNumDirtyCards = numDirtyCards = 0,
4812        nextAddr = lastAddr = startAddr;
4813        nextAddr < endAddr;
4814        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4815 
4816     ResourceMark rm;
4817     HandleMark   hm;
4818 
4819     MemRegion dirtyRegion;
4820     {
4821       // See comments in "Precleaning notes" above on why we
4822       // do this locking. XXX Could the locking overheads be
4823       // too high when dirty cards are sparse? [I don't think so.]
4824       stopTimer();
4825       CMSTokenSync x(true); // is cms thread
4826       startTimer();
4827       sample_eden();
4828       // Get and clear dirty region from card table
4829       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4830                                     MemRegion(nextAddr, endAddr),
4831                                     true,
4832                                     CardTableModRefBS::precleaned_card_val());
4833 
4834       assert(dirtyRegion.start() >= nextAddr,
4835              "returned region inconsistent?");
4836     }
4837     lastAddr = dirtyRegion.end();
4838     numDirtyCards =
4839       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4840 
4841     if (!dirtyRegion.is_empty()) {
4842       stopTimer();
4843       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4844       startTimer();
4845       sample_eden();
4846       verify_work_stacks_empty();
4847       verify_overflow_empty();
4848       DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());)
4849       HeapWord* stop_point =
4850         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4851       if (stop_point != NULL) {
4852         // The careful iteration stopped early because it found an
4853         // uninitialized object.  Redirty the bits corresponding to the
4854         // partially-scanned or unscanned cards, and start again at the
4855         // next block boundary.
4856         assert(CMSPermGenPrecleaningEnabled ||
4857                (_collectorState == AbortablePreclean && should_abort_preclean()),
4858                "Unparsable objects should only be in perm gen.");
4859         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4860         if (should_abort_preclean()) {
4861           break; // out of preclean loop
4862         } else {
4863           // Compute the next address at which preclean should pick up.
4864           lastAddr = next_card_start_after_block(stop_point);
4865         }
4866       }
4867     } else {
4868       break;
4869     }
4870   }
4871   verify_work_stacks_empty();
4872   verify_overflow_empty();
4873   return cumNumDirtyCards;
4874 }
4875 
4876 void CMSCollector::checkpointRootsFinal(bool asynch,
4877   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4878   assert(_collectorState == FinalMarking, "incorrect state transition?");
4879   check_correct_thread_executing();
4880   // world is stopped at this checkpoint
4881   assert(SafepointSynchronize::is_at_safepoint(),
4882          "world should be stopped");
4883   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4884 
4885   verify_work_stacks_empty();
4886   verify_overflow_empty();
4887 
4888   SpecializationStats::clear();
4889   if (PrintGCDetails) {
4890     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4891                         _young_gen->used() / K,
4892                         _young_gen->capacity() / K);
4893   }
4894   if (asynch) {
4895     if (CMSScavengeBeforeRemark) {
4896       GenCollectedHeap* gch = GenCollectedHeap::heap();
4897       // Temporarily set flag to false, GCH->do_collection will
4898       // expect it to be false and set to true
4899       FlagSetting fl(gch->_is_gc_active, false);
4900       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4901         PrintGCDetails && Verbose, true, _gc_timer_cm);)
4902       int level = _cmsGen->level() - 1;
4903       if (level >= 0) {
4904         gch->do_collection(true,        // full (i.e. force, see below)
4905                            false,       // !clear_all_soft_refs
4906                            0,           // size
4907                            false,       // is_tlab
4908                            level        // max_level
4909                           );
4910       }
4911     }
4912     FreelistLocker x(this);
4913     MutexLockerEx y(bitMapLock(),
4914                     Mutex::_no_safepoint_check_flag);
4915     assert(!init_mark_was_synchronous, "but that's impossible!");
4916     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4917   } else {
4918     // already have all the locks
4919     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4920                              init_mark_was_synchronous);
4921   }
4922   verify_work_stacks_empty();
4923   verify_overflow_empty();
4924   SpecializationStats::print();
4925 }
4926 
4927 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4928   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4929 
4930   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4931 
4932   assert(haveFreelistLocks(), "must have free list locks");
4933   assert_lock_strong(bitMapLock());
4934 
4935   if (UseAdaptiveSizePolicy) {
4936     size_policy()->checkpoint_roots_final_begin();
4937   }
4938 
4939   ResourceMark rm;
4940   HandleMark   hm;
4941 
4942   GenCollectedHeap* gch = GenCollectedHeap::heap();
4943 
4944   if (should_unload_classes()) {
4945     CodeCache::gc_prologue();
4946   }
4947   assert(haveFreelistLocks(), "must have free list locks");
4948   assert_lock_strong(bitMapLock());
4949 
4950   DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4951   if (!init_mark_was_synchronous) {
4952     // We might assume that we need not fill TLAB's when
4953     // CMSScavengeBeforeRemark is set, because we may have just done
4954     // a scavenge which would have filled all TLAB's -- and besides
4955     // Eden would be empty. This however may not always be the case --
4956     // for instance although we asked for a scavenge, it may not have
4957     // happened because of a JNI critical section. We probably need
4958     // a policy for deciding whether we can in that case wait until
4959     // the critical section releases and then do the remark following
4960     // the scavenge, and skip it here. In the absence of that policy,
4961     // or of an indication of whether the scavenge did indeed occur,
4962     // we cannot rely on TLAB's having been filled and must do
4963     // so here just in case a scavenge did not happen.
4964     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4965     // Update the saved marks which may affect the root scans.
4966     gch->save_marks();
4967 
4968     {
4969       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4970 
4971       // Note on the role of the mod union table:
4972       // Since the marker in "markFromRoots" marks concurrently with
4973       // mutators, it is possible for some reachable objects not to have been
4974       // scanned. For instance, an only reference to an object A was
4975       // placed in object B after the marker scanned B. Unless B is rescanned,
4976       // A would be collected. Such updates to references in marked objects
4977       // are detected via the mod union table which is the set of all cards
4978       // dirtied since the first checkpoint in this GC cycle and prior to
4979       // the most recent young generation GC, minus those cleaned up by the
4980       // concurrent precleaning.
4981       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
4982         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4983         do_remark_parallel();
4984       } else {
4985         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4986                     _gc_timer_cm);
4987         do_remark_non_parallel();
4988       }
4989     }
4990   } else {
4991     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4992     // The initial mark was stop-world, so there's no rescanning to
4993     // do; go straight on to the next step below.
4994   }
4995   verify_work_stacks_empty();
4996   verify_overflow_empty();
4997 
4998   {
4999     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5000     refProcessingWork(asynch, clear_all_soft_refs);
5001   }
5002   verify_work_stacks_empty();
5003   verify_overflow_empty();
5004 
5005   if (should_unload_classes()) {
5006     CodeCache::gc_epilogue();
5007   }
5008   JvmtiExport::gc_epilogue();
5009 
5010   // If we encountered any (marking stack / work queue) overflow
5011   // events during the current CMS cycle, take appropriate
5012   // remedial measures, where possible, so as to try and avoid
5013   // recurrence of that condition.
5014   assert(_markStack.isEmpty(), "No grey objects");
5015   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5016                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5017   if (ser_ovflw > 0) {
5018     if (PrintCMSStatistics != 0) {
5019       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5020         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5021         ", kac_preclean="SIZE_FORMAT")",
5022         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5023         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5024     }
5025     _markStack.expand();
5026     _ser_pmc_remark_ovflw = 0;
5027     _ser_pmc_preclean_ovflw = 0;
5028     _ser_kac_preclean_ovflw = 0;
5029     _ser_kac_ovflw = 0;
5030   }
5031   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5032     if (PrintCMSStatistics != 0) {
5033       gclog_or_tty->print_cr("Work queue overflow (benign) "
5034         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5035         _par_pmc_remark_ovflw, _par_kac_ovflw);
5036     }
5037     _par_pmc_remark_ovflw = 0;
5038     _par_kac_ovflw = 0;
5039   }
5040   if (PrintCMSStatistics != 0) {
5041      if (_markStack._hit_limit > 0) {
5042        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5043                               _markStack._hit_limit);
5044      }
5045      if (_markStack._failed_double > 0) {
5046        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5047                               " current capacity "SIZE_FORMAT,
5048                               _markStack._failed_double,
5049                               _markStack.capacity());
5050      }
5051   }
5052   _markStack._hit_limit = 0;
5053   _markStack._failed_double = 0;
5054 
5055   // Check that all the klasses have been checked
5056   assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5057 
5058   if ((VerifyAfterGC || VerifyDuringGC) &&
5059       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5060     verify_after_remark();
5061   }
5062 
5063   // Change under the freelistLocks.
5064   _collectorState = Sweeping;
5065   // Call isAllClear() under bitMapLock
5066   assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
5067     " final marking");
5068   if (UseAdaptiveSizePolicy) {
5069     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5070   }
5071 }
5072 
5073 // Parallel remark task
5074 class CMSParRemarkTask: public AbstractGangTask {
5075   CMSCollector* _collector;
5076   int           _n_workers;
5077   CompactibleFreeListSpace* _cms_space;
5078   CompactibleFreeListSpace* _perm_space;
5079 
5080   // The per-thread work queues, available here for stealing.
5081   OopTaskQueueSet*       _task_queues;
5082   ParallelTaskTerminator _term;
5083 
5084  public:
5085   // A value of 0 passed to n_workers will cause the number of
5086   // workers to be taken from the active workers in the work gang.
5087   CMSParRemarkTask(CMSCollector* collector,
5088                    CompactibleFreeListSpace* cms_space,
5089                    CompactibleFreeListSpace* perm_space,
5090                    int n_workers, FlexibleWorkGang* workers,
5091                    OopTaskQueueSet* task_queues):
5092     AbstractGangTask("Rescan roots and grey objects in parallel"),
5093     _collector(collector),
5094     _cms_space(cms_space), _perm_space(perm_space),
5095     _n_workers(n_workers),
5096     _task_queues(task_queues),
5097     _term(n_workers, task_queues) { }
5098 
5099   OopTaskQueueSet* task_queues() { return _task_queues; }
5100 
5101   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5102 
5103   ParallelTaskTerminator* terminator() { return &_term; }
5104   int n_workers() { return _n_workers; }
5105 
5106   void work(uint worker_id);
5107 
5108  private:
5109   // Work method in support of parallel rescan ... of young gen spaces
5110   void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5111                              ContiguousSpace* space,
5112                              HeapWord** chunk_array, size_t chunk_top);
5113 
5114   // ... of  dirty cards in old space
5115   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5116                                   Par_MarkRefsIntoAndScanClosure* cl);
5117 
5118   // ... work stealing for the above
5119   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5120 };
5121 
5122 // work_queue(i) is passed to the closure
5123 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5124 // also is passed to do_dirty_card_rescan_tasks() and to
5125 // do_work_steal() to select the i-th task_queue.
5126 
5127 void CMSParRemarkTask::work(uint worker_id) {
5128   elapsedTimer _timer;
5129   ResourceMark rm;
5130   HandleMark   hm;
5131 
5132   // ---------- rescan from roots --------------
5133   _timer.start();
5134   GenCollectedHeap* gch = GenCollectedHeap::heap();
5135   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5136     _collector->_span, _collector->ref_processor(),
5137     &(_collector->_markBitMap),
5138     work_queue(worker_id), &(_collector->_revisitStack));
5139 
5140   // Rescan young gen roots first since these are likely
5141   // coarsely partitioned and may, on that account, constitute
5142   // the critical path; thus, it's best to start off that
5143   // work first.
5144   // ---------- young gen roots --------------
5145   {
5146     DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5147     EdenSpace* eden_space = dng->eden();
5148     ContiguousSpace* from_space = dng->from();
5149     ContiguousSpace* to_space   = dng->to();
5150 
5151     HeapWord** eca = _collector->_eden_chunk_array;
5152     size_t     ect = _collector->_eden_chunk_index;
5153     HeapWord** sca = _collector->_survivor_chunk_array;
5154     size_t     sct = _collector->_survivor_chunk_index;
5155 
5156     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5157     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5158 
5159     do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
5160     do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
5161     do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
5162 
5163     _timer.stop();
5164     if (PrintCMSStatistics != 0) {
5165       gclog_or_tty->print_cr(
5166         "Finished young gen rescan work in %dth thread: %3.3f sec",
5167         worker_id, _timer.seconds());
5168     }
5169   }
5170 
5171   // ---------- remaining roots --------------
5172   _timer.reset();
5173   _timer.start();
5174   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5175                                 false,     // yg was scanned above
5176                                 false,     // this is parallel code
5177                                 true,      // collecting perm gen
5178                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5179                                 &par_mrias_cl,
5180                                 true,   // walk all of code cache if (so & SO_CodeCache)
5181                                 NULL);
5182   assert(_collector->should_unload_classes()
5183          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5184          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5185   _timer.stop();
5186   if (PrintCMSStatistics != 0) {
5187     gclog_or_tty->print_cr(
5188       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5189       worker_id, _timer.seconds());
5190   }
5191 
5192   // ---------- rescan dirty cards ------------
5193   _timer.reset();
5194   _timer.start();
5195 
5196   // Do the rescan tasks for each of the two spaces
5197   // (cms_space and perm_space) in turn.
5198   // "worker_id" is passed to select the task_queue for "worker_id"
5199   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5200   do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
5201   _timer.stop();
5202   if (PrintCMSStatistics != 0) {
5203     gclog_or_tty->print_cr(
5204       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5205       worker_id, _timer.seconds());
5206   }
5207 
5208   // ---------- steal work from other threads ...
5209   // ---------- ... and drain overflow list.
5210   _timer.reset();
5211   _timer.start();
5212   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5213   _timer.stop();
5214   if (PrintCMSStatistics != 0) {
5215     gclog_or_tty->print_cr(
5216       "Finished work stealing in %dth thread: %3.3f sec",
5217       worker_id, _timer.seconds());
5218   }
5219 }
5220 
5221 // Note that parameter "i" is not used.
5222 void
5223 CMSParRemarkTask::do_young_space_rescan(int i,
5224   Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5225   HeapWord** chunk_array, size_t chunk_top) {
5226   // Until all tasks completed:
5227   // . claim an unclaimed task
5228   // . compute region boundaries corresponding to task claimed
5229   //   using chunk_array
5230   // . par_oop_iterate(cl) over that region
5231 
5232   ResourceMark rm;
5233   HandleMark   hm;
5234 
5235   SequentialSubTasksDone* pst = space->par_seq_tasks();
5236   assert(pst->valid(), "Uninitialized use?");
5237 
5238   uint nth_task = 0;
5239   uint n_tasks  = pst->n_tasks();
5240 
5241   HeapWord *start, *end;
5242   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5243     // We claimed task # nth_task; compute its boundaries.
5244     if (chunk_top == 0) {  // no samples were taken
5245       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5246       start = space->bottom();
5247       end   = space->top();
5248     } else if (nth_task == 0) {
5249       start = space->bottom();
5250       end   = chunk_array[nth_task];
5251     } else if (nth_task < (uint)chunk_top) {
5252       assert(nth_task >= 1, "Control point invariant");
5253       start = chunk_array[nth_task - 1];
5254       end   = chunk_array[nth_task];
5255     } else {
5256       assert(nth_task == (uint)chunk_top, "Control point invariant");
5257       start = chunk_array[chunk_top - 1];
5258       end   = space->top();
5259     }
5260     MemRegion mr(start, end);
5261     // Verify that mr is in space
5262     assert(mr.is_empty() || space->used_region().contains(mr),
5263            "Should be in space");
5264     // Verify that "start" is an object boundary
5265     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5266            "Should be an oop");
5267     space->par_oop_iterate(mr, cl);
5268   }
5269   pst->all_tasks_completed();
5270 }
5271 
5272 void
5273 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5274   CompactibleFreeListSpace* sp, int i,
5275   Par_MarkRefsIntoAndScanClosure* cl) {
5276   // Until all tasks completed:
5277   // . claim an unclaimed task
5278   // . compute region boundaries corresponding to task claimed
5279   // . transfer dirty bits ct->mut for that region
5280   // . apply rescanclosure to dirty mut bits for that region
5281 
5282   ResourceMark rm;
5283   HandleMark   hm;
5284 
5285   OopTaskQueue* work_q = work_queue(i);
5286   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5287   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5288   // CAUTION: This closure has state that persists across calls to
5289   // the work method dirty_range_iterate_clear() in that it has
5290   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5291   // use of that state in the imbedded UpwardsObjectClosure instance
5292   // assumes that the cards are always iterated (even if in parallel
5293   // by several threads) in monotonically increasing order per each
5294   // thread. This is true of the implementation below which picks
5295   // card ranges (chunks) in monotonically increasing order globally
5296   // and, a-fortiori, in monotonically increasing order per thread
5297   // (the latter order being a subsequence of the former).
5298   // If the work code below is ever reorganized into a more chaotic
5299   // work-partitioning form than the current "sequential tasks"
5300   // paradigm, the use of that persistent state will have to be
5301   // revisited and modified appropriately. See also related
5302   // bug 4756801 work on which should examine this code to make
5303   // sure that the changes there do not run counter to the
5304   // assumptions made here and necessary for correctness and
5305   // efficiency. Note also that this code might yield inefficient
5306   // behaviour in the case of very large objects that span one or
5307   // more work chunks. Such objects would potentially be scanned
5308   // several times redundantly. Work on 4756801 should try and
5309   // address that performance anomaly if at all possible. XXX
5310   MemRegion  full_span  = _collector->_span;
5311   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5312   CMSMarkStack* rs = &(_collector->_revisitStack);   // shared
5313   MarkFromDirtyCardsClosure
5314     greyRescanClosure(_collector, full_span, // entire span of interest
5315                       sp, bm, work_q, rs, cl);
5316 
5317   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5318   assert(pst->valid(), "Uninitialized use?");
5319   uint nth_task = 0;
5320   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5321   MemRegion span = sp->used_region();
5322   HeapWord* start_addr = span.start();
5323   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5324                                            alignment);
5325   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5326   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5327          start_addr, "Check alignment");
5328   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5329          chunk_size, "Check alignment");
5330 
5331   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5332     // Having claimed the nth_task, compute corresponding mem-region,
5333     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5334     // The alignment restriction ensures that we do not need any
5335     // synchronization with other gang-workers while setting or
5336     // clearing bits in thus chunk of the MUT.
5337     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5338                                     start_addr + (nth_task+1)*chunk_size);
5339     // The last chunk's end might be way beyond end of the
5340     // used region. In that case pull back appropriately.
5341     if (this_span.end() > end_addr) {
5342       this_span.set_end(end_addr);
5343       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5344     }
5345     // Iterate over the dirty cards covering this chunk, marking them
5346     // precleaned, and setting the corresponding bits in the mod union
5347     // table. Since we have been careful to partition at Card and MUT-word
5348     // boundaries no synchronization is needed between parallel threads.
5349     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5350                                                  &modUnionClosure);
5351 
5352     // Having transferred these marks into the modUnionTable,
5353     // rescan the marked objects on the dirty cards in the modUnionTable.
5354     // Even if this is at a synchronous collection, the initial marking
5355     // may have been done during an asynchronous collection so there
5356     // may be dirty bits in the mod-union table.
5357     _collector->_modUnionTable.dirty_range_iterate_clear(
5358                   this_span, &greyRescanClosure);
5359     _collector->_modUnionTable.verifyNoOneBitsInRange(
5360                                  this_span.start(),
5361                                  this_span.end());
5362   }
5363   pst->all_tasks_completed();  // declare that i am done
5364 }
5365 
5366 // . see if we can share work_queues with ParNew? XXX
5367 void
5368 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5369                                 int* seed) {
5370   OopTaskQueue* work_q = work_queue(i);
5371   NOT_PRODUCT(int num_steals = 0;)
5372   oop obj_to_scan;
5373   CMSBitMap* bm = &(_collector->_markBitMap);
5374 
5375   while (true) {
5376     // Completely finish any left over work from (an) earlier round(s)
5377     cl->trim_queue(0);
5378     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5379                                          (size_t)ParGCDesiredObjsFromOverflowList);
5380     // Now check if there's any work in the overflow list
5381     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5382     // only affects the number of attempts made to get work from the
5383     // overflow list and does not affect the number of workers.  Just
5384     // pass ParallelGCThreads so this behavior is unchanged.
5385     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5386                                                 work_q,
5387                                                 ParallelGCThreads)) {
5388       // found something in global overflow list;
5389       // not yet ready to go stealing work from others.
5390       // We'd like to assert(work_q->size() != 0, ...)
5391       // because we just took work from the overflow list,
5392       // but of course we can't since all of that could have
5393       // been already stolen from us.
5394       // "He giveth and He taketh away."
5395       continue;
5396     }
5397     // Verify that we have no work before we resort to stealing
5398     assert(work_q->size() == 0, "Have work, shouldn't steal");
5399     // Try to steal from other queues that have work
5400     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5401       NOT_PRODUCT(num_steals++;)
5402       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5403       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5404       // Do scanning work
5405       obj_to_scan->oop_iterate(cl);
5406       // Loop around, finish this work, and try to steal some more
5407     } else if (terminator()->offer_termination()) {
5408         break;  // nirvana from the infinite cycle
5409     }
5410   }
5411   NOT_PRODUCT(
5412     if (PrintCMSStatistics != 0) {
5413       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5414     }
5415   )
5416   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5417          "Else our work is not yet done");
5418 }
5419 
5420 // Return a thread-local PLAB recording array, as appropriate.
5421 void* CMSCollector::get_data_recorder(int thr_num) {
5422   if (_survivor_plab_array != NULL &&
5423       (CMSPLABRecordAlways ||
5424        (_collectorState > Marking && _collectorState < FinalMarking))) {
5425     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5426     ChunkArray* ca = &_survivor_plab_array[thr_num];
5427     ca->reset();   // clear it so that fresh data is recorded
5428     return (void*) ca;
5429   } else {
5430     return NULL;
5431   }
5432 }
5433 
5434 // Reset all the thread-local PLAB recording arrays
5435 void CMSCollector::reset_survivor_plab_arrays() {
5436   for (uint i = 0; i < ParallelGCThreads; i++) {
5437     _survivor_plab_array[i].reset();
5438   }
5439 }
5440 
5441 // Merge the per-thread plab arrays into the global survivor chunk
5442 // array which will provide the partitioning of the survivor space
5443 // for CMS rescan.
5444 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5445                                               int no_of_gc_threads) {
5446   assert(_survivor_plab_array  != NULL, "Error");
5447   assert(_survivor_chunk_array != NULL, "Error");
5448   assert(_collectorState == FinalMarking, "Error");
5449   for (int j = 0; j < no_of_gc_threads; j++) {
5450     _cursor[j] = 0;
5451   }
5452   HeapWord* top = surv->top();
5453   size_t i;
5454   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5455     HeapWord* min_val = top;          // Higher than any PLAB address
5456     uint      min_tid = 0;            // position of min_val this round
5457     for (int j = 0; j < no_of_gc_threads; j++) {
5458       ChunkArray* cur_sca = &_survivor_plab_array[j];
5459       if (_cursor[j] == cur_sca->end()) {
5460         continue;
5461       }
5462       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5463       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5464       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5465       if (cur_val < min_val) {
5466         min_tid = j;
5467         min_val = cur_val;
5468       } else {
5469         assert(cur_val < top, "All recorded addresses should be less");
5470       }
5471     }
5472     // At this point min_val and min_tid are respectively
5473     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5474     // and the thread (j) that witnesses that address.
5475     // We record this address in the _survivor_chunk_array[i]
5476     // and increment _cursor[min_tid] prior to the next round i.
5477     if (min_val == top) {
5478       break;
5479     }
5480     _survivor_chunk_array[i] = min_val;
5481     _cursor[min_tid]++;
5482   }
5483   // We are all done; record the size of the _survivor_chunk_array
5484   _survivor_chunk_index = i; // exclusive: [0, i)
5485   if (PrintCMSStatistics > 0) {
5486     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5487   }
5488   // Verify that we used up all the recorded entries
5489   #ifdef ASSERT
5490     size_t total = 0;
5491     for (int j = 0; j < no_of_gc_threads; j++) {
5492       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5493       total += _cursor[j];
5494     }
5495     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5496     // Check that the merged array is in sorted order
5497     if (total > 0) {
5498       for (size_t i = 0; i < total - 1; i++) {
5499         if (PrintCMSStatistics > 0) {
5500           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5501                               i, _survivor_chunk_array[i]);
5502         }
5503         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5504                "Not sorted");
5505       }
5506     }
5507   #endif // ASSERT
5508 }
5509 
5510 // Set up the space's par_seq_tasks structure for work claiming
5511 // for parallel rescan of young gen.
5512 // See ParRescanTask where this is currently used.
5513 void
5514 CMSCollector::
5515 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5516   assert(n_threads > 0, "Unexpected n_threads argument");
5517   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5518 
5519   // Eden space
5520   {
5521     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5522     assert(!pst->valid(), "Clobbering existing data?");
5523     // Each valid entry in [0, _eden_chunk_index) represents a task.
5524     size_t n_tasks = _eden_chunk_index + 1;
5525     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5526     // Sets the condition for completion of the subtask (how many threads
5527     // need to finish in order to be done).
5528     pst->set_n_threads(n_threads);
5529     pst->set_n_tasks((int)n_tasks);
5530   }
5531 
5532   // Merge the survivor plab arrays into _survivor_chunk_array
5533   if (_survivor_plab_array != NULL) {
5534     merge_survivor_plab_arrays(dng->from(), n_threads);
5535   } else {
5536     assert(_survivor_chunk_index == 0, "Error");
5537   }
5538 
5539   // To space
5540   {
5541     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5542     assert(!pst->valid(), "Clobbering existing data?");
5543     // Sets the condition for completion of the subtask (how many threads
5544     // need to finish in order to be done).
5545     pst->set_n_threads(n_threads);
5546     pst->set_n_tasks(1);
5547     assert(pst->valid(), "Error");
5548   }
5549 
5550   // From space
5551   {
5552     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5553     assert(!pst->valid(), "Clobbering existing data?");
5554     size_t n_tasks = _survivor_chunk_index + 1;
5555     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5556     // Sets the condition for completion of the subtask (how many threads
5557     // need to finish in order to be done).
5558     pst->set_n_threads(n_threads);
5559     pst->set_n_tasks((int)n_tasks);
5560     assert(pst->valid(), "Error");
5561   }
5562 }
5563 
5564 // Parallel version of remark
5565 void CMSCollector::do_remark_parallel() {
5566   GenCollectedHeap* gch = GenCollectedHeap::heap();
5567   FlexibleWorkGang* workers = gch->workers();
5568   assert(workers != NULL, "Need parallel worker threads.");
5569   // Choose to use the number of GC workers most recently set
5570   // into "active_workers".  If active_workers is not set, set it
5571   // to ParallelGCThreads.
5572   int n_workers = workers->active_workers();
5573   if (n_workers == 0) {
5574     assert(n_workers > 0, "Should have been set during scavenge");
5575     n_workers = ParallelGCThreads;
5576     workers->set_active_workers(n_workers);
5577   }
5578   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5579   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
5580 
5581   CMSParRemarkTask tsk(this,
5582     cms_space, perm_space,
5583     n_workers, workers, task_queues());
5584 
5585   // Set up for parallel process_strong_roots work.
5586   gch->set_par_threads(n_workers);
5587   // We won't be iterating over the cards in the card table updating
5588   // the younger_gen cards, so we shouldn't call the following else
5589   // the verification code as well as subsequent younger_refs_iterate
5590   // code would get confused. XXX
5591   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5592 
5593   // The young gen rescan work will not be done as part of
5594   // process_strong_roots (which currently doesn't knw how to
5595   // parallelize such a scan), but rather will be broken up into
5596   // a set of parallel tasks (via the sampling that the [abortable]
5597   // preclean phase did of EdenSpace, plus the [two] tasks of
5598   // scanning the [two] survivor spaces. Further fine-grain
5599   // parallelization of the scanning of the survivor spaces
5600   // themselves, and of precleaning of the younger gen itself
5601   // is deferred to the future.
5602   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5603 
5604   // The dirty card rescan work is broken up into a "sequence"
5605   // of parallel tasks (per constituent space) that are dynamically
5606   // claimed by the parallel threads.
5607   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5608   perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
5609 
5610   // It turns out that even when we're using 1 thread, doing the work in a
5611   // separate thread causes wide variance in run times.  We can't help this
5612   // in the multi-threaded case, but we special-case n=1 here to get
5613   // repeatable measurements of the 1-thread overhead of the parallel code.
5614   if (n_workers > 1) {
5615     // Make refs discovery MT-safe, if it isn't already: it may not
5616     // necessarily be so, since it's possible that we are doing
5617     // ST marking.
5618     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5619     GenCollectedHeap::StrongRootsScope srs(gch);
5620     workers->run_task(&tsk);
5621   } else {
5622     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5623     GenCollectedHeap::StrongRootsScope srs(gch);
5624     tsk.work(0);
5625   }
5626   gch->set_par_threads(0);  // 0 ==> non-parallel.
5627   // restore, single-threaded for now, any preserved marks
5628   // as a result of work_q overflow
5629   restore_preserved_marks_if_any();
5630 }
5631 
5632 // Non-parallel version of remark
5633 void CMSCollector::do_remark_non_parallel() {
5634   ResourceMark rm;
5635   HandleMark   hm;
5636   GenCollectedHeap* gch = GenCollectedHeap::heap();
5637   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5638 
5639   MarkRefsIntoAndScanClosure
5640     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
5641              &_markStack, &_revisitStack, this,
5642              false /* should_yield */, false /* not precleaning */);
5643   MarkFromDirtyCardsClosure
5644     markFromDirtyCardsClosure(this, _span,
5645                               NULL,  // space is set further below
5646                               &_markBitMap, &_markStack, &_revisitStack,
5647                               &mrias_cl);
5648   {
5649     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5650     // Iterate over the dirty cards, setting the corresponding bits in the
5651     // mod union table.
5652     {
5653       ModUnionClosure modUnionClosure(&_modUnionTable);
5654       _ct->ct_bs()->dirty_card_iterate(
5655                       _cmsGen->used_region(),
5656                       &modUnionClosure);
5657       _ct->ct_bs()->dirty_card_iterate(
5658                       _permGen->used_region(),
5659                       &modUnionClosure);
5660     }
5661     // Having transferred these marks into the modUnionTable, we just need
5662     // to rescan the marked objects on the dirty cards in the modUnionTable.
5663     // The initial marking may have been done during an asynchronous
5664     // collection so there may be dirty bits in the mod-union table.
5665     const int alignment =
5666       CardTableModRefBS::card_size * BitsPerWord;
5667     {
5668       // ... First handle dirty cards in CMS gen
5669       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5670       MemRegion ur = _cmsGen->used_region();
5671       HeapWord* lb = ur.start();
5672       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5673       MemRegion cms_span(lb, ub);
5674       _modUnionTable.dirty_range_iterate_clear(cms_span,
5675                                                &markFromDirtyCardsClosure);
5676       verify_work_stacks_empty();
5677       if (PrintCMSStatistics != 0) {
5678         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5679           markFromDirtyCardsClosure.num_dirty_cards());
5680       }
5681     }
5682     {
5683       // .. and then repeat for dirty cards in perm gen
5684       markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
5685       MemRegion ur = _permGen->used_region();
5686       HeapWord* lb = ur.start();
5687       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5688       MemRegion perm_span(lb, ub);
5689       _modUnionTable.dirty_range_iterate_clear(perm_span,
5690                                                &markFromDirtyCardsClosure);
5691       verify_work_stacks_empty();
5692       if (PrintCMSStatistics != 0) {
5693         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
5694           markFromDirtyCardsClosure.num_dirty_cards());
5695       }
5696     }
5697   }
5698   if (VerifyDuringGC &&
5699       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5700     HandleMark hm;  // Discard invalid handles created during verification
5701     Universe::verify();
5702   }
5703   {
5704     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5705 
5706     verify_work_stacks_empty();
5707 
5708     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5709     GenCollectedHeap::StrongRootsScope srs(gch);
5710     gch->gen_process_strong_roots(_cmsGen->level(),
5711                                   true,  // younger gens as roots
5712                                   false, // use the local StrongRootsScope
5713                                   true,  // collecting perm gen
5714                                   SharedHeap::ScanningOption(roots_scanning_options()),
5715                                   &mrias_cl,
5716                                   true,   // walk code active on stacks
5717                                   NULL);
5718     assert(should_unload_classes()
5719            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5720            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5721   }
5722   verify_work_stacks_empty();
5723   // Restore evacuated mark words, if any, used for overflow list links
5724   if (!CMSOverflowEarlyRestoration) {
5725     restore_preserved_marks_if_any();
5726   }
5727   verify_overflow_empty();
5728 }
5729 
5730 ////////////////////////////////////////////////////////
5731 // Parallel Reference Processing Task Proxy Class
5732 ////////////////////////////////////////////////////////
5733 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5734   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5735   CMSCollector*          _collector;
5736   CMSBitMap*             _mark_bit_map;
5737   const MemRegion        _span;
5738   ProcessTask&           _task;
5739 
5740 public:
5741   CMSRefProcTaskProxy(ProcessTask&     task,
5742                       CMSCollector*    collector,
5743                       const MemRegion& span,
5744                       CMSBitMap*       mark_bit_map,
5745                       AbstractWorkGang* workers,
5746                       OopTaskQueueSet* task_queues):
5747     // XXX Should superclass AGTWOQ also know about AWG since it knows
5748     // about the task_queues used by the AWG? Then it could initialize
5749     // the terminator() object. See 6984287. The set_for_termination()
5750     // below is a temporary band-aid for the regression in 6984287.
5751     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5752       task_queues),
5753     _task(task),
5754     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5755   {
5756     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5757            "Inconsistency in _span");
5758     set_for_termination(workers->active_workers());
5759   }
5760 
5761   OopTaskQueueSet* task_queues() { return queues(); }
5762 
5763   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5764 
5765   void do_work_steal(int i,
5766                      CMSParDrainMarkingStackClosure* drain,
5767                      CMSParKeepAliveClosure* keep_alive,
5768                      int* seed);
5769 
5770   virtual void work(uint worker_id);
5771 };
5772 
5773 void CMSRefProcTaskProxy::work(uint worker_id) {
5774   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5775   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5776                                         _mark_bit_map,
5777                                         &_collector->_revisitStack,
5778                                         work_queue(worker_id));
5779   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5780                                                  _mark_bit_map,
5781                                                  &_collector->_revisitStack,
5782                                                  work_queue(worker_id));
5783   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5784   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5785   if (_task.marks_oops_alive()) {
5786     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5787                   _collector->hash_seed(worker_id));
5788   }
5789   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5790   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5791 }
5792 
5793 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5794   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5795   EnqueueTask& _task;
5796 
5797 public:
5798   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5799     : AbstractGangTask("Enqueue reference objects in parallel"),
5800       _task(task)
5801   { }
5802 
5803   virtual void work(uint worker_id)
5804   {
5805     _task.work(worker_id);
5806   }
5807 };
5808 
5809 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5810   MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
5811   OopTaskQueue* work_queue):
5812    Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
5813    _span(span),
5814    _bit_map(bit_map),
5815    _work_queue(work_queue),
5816    _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
5817    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5818                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5819 { }
5820 
5821 // . see if we can share work_queues with ParNew? XXX
5822 void CMSRefProcTaskProxy::do_work_steal(int i,
5823   CMSParDrainMarkingStackClosure* drain,
5824   CMSParKeepAliveClosure* keep_alive,
5825   int* seed) {
5826   OopTaskQueue* work_q = work_queue(i);
5827   NOT_PRODUCT(int num_steals = 0;)
5828   oop obj_to_scan;
5829 
5830   while (true) {
5831     // Completely finish any left over work from (an) earlier round(s)
5832     drain->trim_queue(0);
5833     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5834                                          (size_t)ParGCDesiredObjsFromOverflowList);
5835     // Now check if there's any work in the overflow list
5836     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5837     // only affects the number of attempts made to get work from the
5838     // overflow list and does not affect the number of workers.  Just
5839     // pass ParallelGCThreads so this behavior is unchanged.
5840     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5841                                                 work_q,
5842                                                 ParallelGCThreads)) {
5843       // Found something in global overflow list;
5844       // not yet ready to go stealing work from others.
5845       // We'd like to assert(work_q->size() != 0, ...)
5846       // because we just took work from the overflow list,
5847       // but of course we can't, since all of that might have
5848       // been already stolen from us.
5849       continue;
5850     }
5851     // Verify that we have no work before we resort to stealing
5852     assert(work_q->size() == 0, "Have work, shouldn't steal");
5853     // Try to steal from other queues that have work
5854     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5855       NOT_PRODUCT(num_steals++;)
5856       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5857       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5858       // Do scanning work
5859       obj_to_scan->oop_iterate(keep_alive);
5860       // Loop around, finish this work, and try to steal some more
5861     } else if (terminator()->offer_termination()) {
5862       break;  // nirvana from the infinite cycle
5863     }
5864   }
5865   NOT_PRODUCT(
5866     if (PrintCMSStatistics != 0) {
5867       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5868     }
5869   )
5870 }
5871 
5872 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5873 {
5874   GenCollectedHeap* gch = GenCollectedHeap::heap();
5875   FlexibleWorkGang* workers = gch->workers();
5876   assert(workers != NULL, "Need parallel worker threads.");
5877   CMSRefProcTaskProxy rp_task(task, &_collector,
5878                               _collector.ref_processor()->span(),
5879                               _collector.markBitMap(),
5880                               workers, _collector.task_queues());
5881   workers->run_task(&rp_task);
5882 }
5883 
5884 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5885 {
5886 
5887   GenCollectedHeap* gch = GenCollectedHeap::heap();
5888   FlexibleWorkGang* workers = gch->workers();
5889   assert(workers != NULL, "Need parallel worker threads.");
5890   CMSRefEnqueueTaskProxy enq_task(task);
5891   workers->run_task(&enq_task);
5892 }
5893 
5894 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5895 
5896   ResourceMark rm;
5897   HandleMark   hm;
5898 
5899   ReferenceProcessor* rp = ref_processor();
5900   assert(rp->span().equals(_span), "Spans should be equal");
5901   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5902   // Process weak references.
5903   rp->setup_policy(clear_all_soft_refs);
5904   verify_work_stacks_empty();
5905 
5906   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5907                                           &_markStack, &_revisitStack,
5908                                           false /* !preclean */);
5909   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5910                                 _span, &_markBitMap, &_markStack,
5911                                 &cmsKeepAliveClosure, false /* !preclean */);
5912   {
5913     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5914     if (rp->processing_is_mt()) {
5915       // Set the degree of MT here.  If the discovery is done MT, there
5916       // may have been a different number of threads doing the discovery
5917       // and a different number of discovered lists may have Ref objects.
5918       // That is OK as long as the Reference lists are balanced (see
5919       // balance_all_queues() and balance_queues()).
5920       GenCollectedHeap* gch = GenCollectedHeap::heap();
5921       int active_workers = ParallelGCThreads;
5922       FlexibleWorkGang* workers = gch->workers();
5923       if (workers != NULL) {
5924         active_workers = workers->active_workers();
5925         // The expectation is that active_workers will have already
5926         // been set to a reasonable value.  If it has not been set,
5927         // investigate.
5928         assert(active_workers > 0, "Should have been set during scavenge");
5929       }
5930       rp->set_active_mt_degree(active_workers);
5931       CMSRefProcTaskExecutor task_executor(*this);
5932       rp->process_discovered_references(&_is_alive_closure,
5933                                         &cmsKeepAliveClosure,
5934                                         &cmsDrainMarkingStackClosure,
5935                                         &task_executor,
5936                                         _gc_timer_cm);
5937     } else {
5938       rp->process_discovered_references(&_is_alive_closure,
5939                                         &cmsKeepAliveClosure,
5940                                         &cmsDrainMarkingStackClosure,
5941                                         NULL,
5942                                         _gc_timer_cm);
5943     }
5944     verify_work_stacks_empty();
5945   }
5946 
5947   if (should_unload_classes()) {
5948     {
5949       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5950 
5951       // Follow SystemDictionary roots and unload classes
5952       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5953 
5954       // Follow CodeCache roots and unload any methods marked for unloading
5955       CodeCache::do_unloading(&_is_alive_closure,
5956                               &cmsKeepAliveClosure,
5957                               purged_class);
5958 
5959       cmsDrainMarkingStackClosure.do_void();
5960       verify_work_stacks_empty();
5961 
5962       // Update subklass/sibling/implementor links in KlassKlass descendants
5963       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
5964       oop k;
5965       while ((k = _revisitStack.pop()) != NULL) {
5966         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
5967                        &_is_alive_closure,
5968                        &cmsKeepAliveClosure);
5969       }
5970       assert(!ClassUnloading ||
5971              (_markStack.isEmpty() && overflow_list_is_empty()),
5972              "Should not have found new reachable objects");
5973       assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
5974       cmsDrainMarkingStackClosure.do_void();
5975       verify_work_stacks_empty();
5976     }
5977 
5978     {
5979       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5980       // Clean up unreferenced symbols in symbol table.
5981       SymbolTable::unlink();
5982     }
5983   }
5984 
5985   if (should_unload_classes() || !JavaObjectsInPerm) {
5986     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5987     // Now clean up stale oops in StringTable
5988     StringTable::unlink(&_is_alive_closure);
5989   }
5990 
5991   verify_work_stacks_empty();
5992   // Restore any preserved marks as a result of mark stack or
5993   // work queue overflow
5994   restore_preserved_marks_if_any();  // done single-threaded for now
5995 
5996   rp->set_enqueuing_is_done(true);
5997   if (rp->processing_is_mt()) {
5998     rp->balance_all_queues();
5999     CMSRefProcTaskExecutor task_executor(*this);
6000     rp->enqueue_discovered_references(&task_executor);
6001   } else {
6002     rp->enqueue_discovered_references(NULL);
6003   }
6004   rp->verify_no_references_recorded();
6005   assert(!rp->discovery_enabled(), "should have been disabled");
6006 }
6007 
6008 #ifndef PRODUCT
6009 void CMSCollector::check_correct_thread_executing() {
6010   Thread* t = Thread::current();
6011   // Only the VM thread or the CMS thread should be here.
6012   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6013          "Unexpected thread type");
6014   // If this is the vm thread, the foreground process
6015   // should not be waiting.  Note that _foregroundGCIsActive is
6016   // true while the foreground collector is waiting.
6017   if (_foregroundGCShouldWait) {
6018     // We cannot be the VM thread
6019     assert(t->is_ConcurrentGC_thread(),
6020            "Should be CMS thread");
6021   } else {
6022     // We can be the CMS thread only if we are in a stop-world
6023     // phase of CMS collection.
6024     if (t->is_ConcurrentGC_thread()) {
6025       assert(_collectorState == InitialMarking ||
6026              _collectorState == FinalMarking,
6027              "Should be a stop-world phase");
6028       // The CMS thread should be holding the CMS_token.
6029       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6030              "Potential interference with concurrently "
6031              "executing VM thread");
6032     }
6033   }
6034 }
6035 #endif
6036 
6037 void CMSCollector::sweep(bool asynch) {
6038   assert(_collectorState == Sweeping, "just checking");
6039   check_correct_thread_executing();
6040   verify_work_stacks_empty();
6041   verify_overflow_empty();
6042   increment_sweep_count();
6043   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6044 
6045   _inter_sweep_timer.stop();
6046   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6047   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6048 
6049   // PermGen verification support: If perm gen sweeping is disabled in
6050   // this cycle, we preserve the perm gen object "deadness" information
6051   // in the perm_gen_verify_bit_map. In order to do that we traverse
6052   // all blocks in perm gen and mark all dead objects.
6053   if (verifying() && !should_unload_classes()) {
6054     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
6055            "Should have already been allocated");
6056     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
6057                                markBitMap(), perm_gen_verify_bit_map());
6058     if (asynch) {
6059       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6060                                bitMapLock());
6061       _permGen->cmsSpace()->blk_iterate(&mdo);
6062     } else {
6063       // In the case of synchronous sweep, we already have
6064       // the requisite locks/tokens.
6065       _permGen->cmsSpace()->blk_iterate(&mdo);
6066     }
6067   }
6068 
6069   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6070   _intra_sweep_timer.reset();
6071   _intra_sweep_timer.start();
6072   if (asynch) {
6073     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6074     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6075     // First sweep the old gen then the perm gen
6076     {
6077       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6078                                bitMapLock());
6079       sweepWork(_cmsGen, asynch);
6080     }
6081 
6082     // Now repeat for perm gen
6083     if (should_unload_classes()) {
6084       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
6085                              bitMapLock());
6086       sweepWork(_permGen, asynch);
6087     }
6088 
6089     // Update Universe::_heap_*_at_gc figures.
6090     // We need all the free list locks to make the abstract state
6091     // transition from Sweeping to Resetting. See detailed note
6092     // further below.
6093     {
6094       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6095                                _permGen->freelistLock());
6096       // Update heap occupancy information which is used as
6097       // input to soft ref clearing policy at the next gc.
6098       Universe::update_heap_info_at_gc();
6099       _collectorState = Resizing;
6100     }
6101   } else {
6102     // already have needed locks
6103     sweepWork(_cmsGen,  asynch);
6104 
6105     if (should_unload_classes()) {
6106       sweepWork(_permGen, asynch);
6107     }
6108     // Update heap occupancy information which is used as
6109     // input to soft ref clearing policy at the next gc.
6110     Universe::update_heap_info_at_gc();
6111     _collectorState = Resizing;
6112   }
6113   verify_work_stacks_empty();
6114   verify_overflow_empty();
6115 
6116   _intra_sweep_timer.stop();
6117   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6118 
6119   _inter_sweep_timer.reset();
6120   _inter_sweep_timer.start();
6121 
6122   // We need to use a monotonically non-deccreasing time in ms
6123   // or we will see time-warp warnings and os::javaTimeMillis()
6124   // does not guarantee monotonicity.
6125   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6126   update_time_of_last_gc(now);
6127 
6128   // NOTE on abstract state transitions:
6129   // Mutators allocate-live and/or mark the mod-union table dirty
6130   // based on the state of the collection.  The former is done in
6131   // the interval [Marking, Sweeping] and the latter in the interval
6132   // [Marking, Sweeping).  Thus the transitions into the Marking state
6133   // and out of the Sweeping state must be synchronously visible
6134   // globally to the mutators.
6135   // The transition into the Marking state happens with the world
6136   // stopped so the mutators will globally see it.  Sweeping is
6137   // done asynchronously by the background collector so the transition
6138   // from the Sweeping state to the Resizing state must be done
6139   // under the freelistLock (as is the check for whether to
6140   // allocate-live and whether to dirty the mod-union table).
6141   assert(_collectorState == Resizing, "Change of collector state to"
6142     " Resizing must be done under the freelistLocks (plural)");
6143 
6144   // Now that sweeping has been completed, we clear
6145   // the incremental_collection_failed flag,
6146   // thus inviting a younger gen collection to promote into
6147   // this generation. If such a promotion may still fail,
6148   // the flag will be set again when a young collection is
6149   // attempted.
6150   GenCollectedHeap* gch = GenCollectedHeap::heap();
6151   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6152   gch->update_full_collections_completed(_collection_count_start);
6153 }
6154 
6155 // FIX ME!!! Looks like this belongs in CFLSpace, with
6156 // CMSGen merely delegating to it.
6157 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6158   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6159   HeapWord*  minAddr        = _cmsSpace->bottom();
6160   HeapWord*  largestAddr    =
6161     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6162   if (largestAddr == NULL) {
6163     // The dictionary appears to be empty.  In this case
6164     // try to coalesce at the end of the heap.
6165     largestAddr = _cmsSpace->end();
6166   }
6167   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6168   size_t nearLargestOffset =
6169     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6170   if (PrintFLSStatistics != 0) {
6171     gclog_or_tty->print_cr(
6172       "CMS: Large Block: " PTR_FORMAT ";"
6173       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6174       largestAddr,
6175       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6176   }
6177   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6178 }
6179 
6180 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6181   return addr >= _cmsSpace->nearLargestChunk();
6182 }
6183 
6184 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6185   return _cmsSpace->find_chunk_at_end();
6186 }
6187 
6188 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6189                                                     bool full) {
6190   // The next lower level has been collected.  Gather any statistics
6191   // that are of interest at this point.
6192   if (!full && (current_level + 1) == level()) {
6193     // Gather statistics on the young generation collection.
6194     collector()->stats().record_gc0_end(used());
6195   }
6196 }
6197 
6198 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6199   GenCollectedHeap* gch = GenCollectedHeap::heap();
6200   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6201     "Wrong type of heap");
6202   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6203     gch->gen_policy()->size_policy();
6204   assert(sp->is_gc_cms_adaptive_size_policy(),
6205     "Wrong type of size policy");
6206   return sp;
6207 }
6208 
6209 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6210   if (PrintGCDetails && Verbose) {
6211     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6212   }
6213   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6214   _debug_collection_type =
6215     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6216   if (PrintGCDetails && Verbose) {
6217     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6218   }
6219 }
6220 
6221 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6222   bool asynch) {
6223   // We iterate over the space(s) underlying this generation,
6224   // checking the mark bit map to see if the bits corresponding
6225   // to specific blocks are marked or not. Blocks that are
6226   // marked are live and are not swept up. All remaining blocks
6227   // are swept up, with coalescing on-the-fly as we sweep up
6228   // contiguous free and/or garbage blocks:
6229   // We need to ensure that the sweeper synchronizes with allocators
6230   // and stop-the-world collectors. In particular, the following
6231   // locks are used:
6232   // . CMS token: if this is held, a stop the world collection cannot occur
6233   // . freelistLock: if this is held no allocation can occur from this
6234   //                 generation by another thread
6235   // . bitMapLock: if this is held, no other thread can access or update
6236   //
6237 
6238   // Note that we need to hold the freelistLock if we use
6239   // block iterate below; else the iterator might go awry if
6240   // a mutator (or promotion) causes block contents to change
6241   // (for instance if the allocator divvies up a block).
6242   // If we hold the free list lock, for all practical purposes
6243   // young generation GC's can't occur (they'll usually need to
6244   // promote), so we might as well prevent all young generation
6245   // GC's while we do a sweeping step. For the same reason, we might
6246   // as well take the bit map lock for the entire duration
6247 
6248   // check that we hold the requisite locks
6249   assert(have_cms_token(), "Should hold cms token");
6250   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6251          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6252         "Should possess CMS token to sweep");
6253   assert_lock_strong(gen->freelistLock());
6254   assert_lock_strong(bitMapLock());
6255 
6256   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6257   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6258   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6259                                       _inter_sweep_estimate.padded_average(),
6260                                       _intra_sweep_estimate.padded_average());
6261   gen->setNearLargestChunk();
6262 
6263   {
6264     SweepClosure sweepClosure(this, gen, &_markBitMap,
6265                             CMSYield && asynch);
6266     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6267     // We need to free-up/coalesce garbage/blocks from a
6268     // co-terminal free run. This is done in the SweepClosure
6269     // destructor; so, do not remove this scope, else the
6270     // end-of-sweep-census below will be off by a little bit.
6271   }
6272   gen->cmsSpace()->sweep_completed();
6273   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6274   if (should_unload_classes()) {                // unloaded classes this cycle,
6275     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6276   } else {                                      // did not unload classes,
6277     _concurrent_cycles_since_last_unload++;     // ... increment count
6278   }
6279 }
6280 
6281 // Reset CMS data structures (for now just the marking bit map)
6282 // preparatory for the next cycle.
6283 void CMSCollector::reset(bool asynch) {
6284   GenCollectedHeap* gch = GenCollectedHeap::heap();
6285   CMSAdaptiveSizePolicy* sp = size_policy();
6286   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6287   if (asynch) {
6288     CMSTokenSyncWithLocks ts(true, bitMapLock());
6289 
6290     // If the state is not "Resetting", the foreground  thread
6291     // has done a collection and the resetting.
6292     if (_collectorState != Resetting) {
6293       assert(_collectorState == Idling, "The state should only change"
6294         " because the foreground collector has finished the collection");
6295       return;
6296     }
6297 
6298     // Clear the mark bitmap (no grey objects to start with)
6299     // for the next cycle.
6300     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6301     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6302 
6303     HeapWord* curAddr = _markBitMap.startWord();
6304     while (curAddr < _markBitMap.endWord()) {
6305       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6306       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6307       _markBitMap.clear_large_range(chunk);
6308       if (ConcurrentMarkSweepThread::should_yield() &&
6309           !foregroundGCIsActive() &&
6310           CMSYield) {
6311         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6312                "CMS thread should hold CMS token");
6313         assert_lock_strong(bitMapLock());
6314         bitMapLock()->unlock();
6315         ConcurrentMarkSweepThread::desynchronize(true);
6316         ConcurrentMarkSweepThread::acknowledge_yield_request();
6317         stopTimer();
6318         if (PrintCMSStatistics != 0) {
6319           incrementYields();
6320         }
6321         icms_wait();
6322 
6323         // See the comment in coordinator_yield()
6324         for (unsigned i = 0; i < CMSYieldSleepCount &&
6325                          ConcurrentMarkSweepThread::should_yield() &&
6326                          !CMSCollector::foregroundGCIsActive(); ++i) {
6327           os::sleep(Thread::current(), 1, false);
6328           ConcurrentMarkSweepThread::acknowledge_yield_request();
6329         }
6330 
6331         ConcurrentMarkSweepThread::synchronize(true);
6332         bitMapLock()->lock_without_safepoint_check();
6333         startTimer();
6334       }
6335       curAddr = chunk.end();
6336     }
6337     // A successful mostly concurrent collection has been done.
6338     // Because only the full (i.e., concurrent mode failure) collections
6339     // are being measured for gc overhead limits, clean the "near" flag
6340     // and count.
6341     sp->reset_gc_overhead_limit_count();
6342     _collectorState = Idling;
6343   } else {
6344     // already have the lock
6345     assert(_collectorState == Resetting, "just checking");
6346     assert_lock_strong(bitMapLock());
6347     _markBitMap.clear_all();
6348     _collectorState = Idling;
6349   }
6350 
6351   // Stop incremental mode after a cycle completes, so that any future cycles
6352   // are triggered by allocation.
6353   stop_icms();
6354 
6355   NOT_PRODUCT(
6356     if (RotateCMSCollectionTypes) {
6357       _cmsGen->rotate_debug_collection_type();
6358     }
6359   )
6360 
6361   register_gc_end();
6362 }
6363 
6364 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6365   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6366   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6367   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6368   TraceCollectorStats tcs(counters());
6369 
6370   switch (op) {
6371     case CMS_op_checkpointRootsInitial: {
6372       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6373       checkpointRootsInitial(true);       // asynch
6374       if (PrintGC) {
6375         _cmsGen->printOccupancy("initial-mark");
6376       }
6377       break;
6378     }
6379     case CMS_op_checkpointRootsFinal: {
6380       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6381       checkpointRootsFinal(true,    // asynch
6382                            false,   // !clear_all_soft_refs
6383                            false);  // !init_mark_was_synchronous
6384       if (PrintGC) {
6385         _cmsGen->printOccupancy("remark");
6386       }
6387       break;
6388     }
6389     default:
6390       fatal("No such CMS_op");
6391   }
6392 }
6393 
6394 #ifndef PRODUCT
6395 size_t const CMSCollector::skip_header_HeapWords() {
6396   return FreeChunk::header_size();
6397 }
6398 
6399 // Try and collect here conditions that should hold when
6400 // CMS thread is exiting. The idea is that the foreground GC
6401 // thread should not be blocked if it wants to terminate
6402 // the CMS thread and yet continue to run the VM for a while
6403 // after that.
6404 void CMSCollector::verify_ok_to_terminate() const {
6405   assert(Thread::current()->is_ConcurrentGC_thread(),
6406          "should be called by CMS thread");
6407   assert(!_foregroundGCShouldWait, "should be false");
6408   // We could check here that all the various low-level locks
6409   // are not held by the CMS thread, but that is overkill; see
6410   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6411   // is checked.
6412 }
6413 #endif
6414 
6415 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6416    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6417           "missing Printezis mark?");
6418   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6419   size_t size = pointer_delta(nextOneAddr + 1, addr);
6420   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6421          "alignment problem");
6422   assert(size >= 3, "Necessary for Printezis marks to work");
6423   return size;
6424 }
6425 
6426 // A variant of the above (block_size_using_printezis_bits()) except
6427 // that we return 0 if the P-bits are not yet set.
6428 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6429   if (_markBitMap.isMarked(addr + 1)) {
6430     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6431     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6432     size_t size = pointer_delta(nextOneAddr + 1, addr);
6433     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6434            "alignment problem");
6435     assert(size >= 3, "Necessary for Printezis marks to work");
6436     return size;
6437   }
6438   return 0;
6439 }
6440 
6441 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6442   size_t sz = 0;
6443   oop p = (oop)addr;
6444   if (p->klass_or_null() != NULL && p->is_parsable()) {
6445     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6446   } else {
6447     sz = block_size_using_printezis_bits(addr);
6448   }
6449   assert(sz > 0, "size must be nonzero");
6450   HeapWord* next_block = addr + sz;
6451   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6452                                              CardTableModRefBS::card_size);
6453   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6454          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6455          "must be different cards");
6456   return next_card;
6457 }
6458 
6459 
6460 // CMS Bit Map Wrapper /////////////////////////////////////////
6461 
6462 // Construct a CMS bit map infrastructure, but don't create the
6463 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6464 // further below.
6465 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6466   _bm(),
6467   _shifter(shifter),
6468   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6469 {
6470   _bmStartWord = 0;
6471   _bmWordSize  = 0;
6472 }
6473 
6474 bool CMSBitMap::allocate(MemRegion mr) {
6475   _bmStartWord = mr.start();
6476   _bmWordSize  = mr.word_size();
6477   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6478                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6479   if (!brs.is_reserved()) {
6480     warning("CMS bit map allocation failure");
6481     return false;
6482   }
6483   // For now we'll just commit all of the bit map up fromt.
6484   // Later on we'll try to be more parsimonious with swap.
6485   if (!_virtual_space.initialize(brs, brs.size())) {
6486     warning("CMS bit map backing store failure");
6487     return false;
6488   }
6489   assert(_virtual_space.committed_size() == brs.size(),
6490          "didn't reserve backing store for all of CMS bit map?");
6491   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6492   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6493          _bmWordSize, "inconsistency in bit map sizing");
6494   _bm.set_size(_bmWordSize >> _shifter);
6495 
6496   // bm.clear(); // can we rely on getting zero'd memory? verify below
6497   assert(isAllClear(),
6498          "Expected zero'd memory from ReservedSpace constructor");
6499   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6500          "consistency check");
6501   return true;
6502 }
6503 
6504 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6505   HeapWord *next_addr, *end_addr, *last_addr;
6506   assert_locked();
6507   assert(covers(mr), "out-of-range error");
6508   // XXX assert that start and end are appropriately aligned
6509   for (next_addr = mr.start(), end_addr = mr.end();
6510        next_addr < end_addr; next_addr = last_addr) {
6511     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6512     last_addr = dirty_region.end();
6513     if (!dirty_region.is_empty()) {
6514       cl->do_MemRegion(dirty_region);
6515     } else {
6516       assert(last_addr == end_addr, "program logic");
6517       return;
6518     }
6519   }
6520 }
6521 
6522 #ifndef PRODUCT
6523 void CMSBitMap::assert_locked() const {
6524   CMSLockVerifier::assert_locked(lock());
6525 }
6526 
6527 bool CMSBitMap::covers(MemRegion mr) const {
6528   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6529   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6530          "size inconsistency");
6531   return (mr.start() >= _bmStartWord) &&
6532          (mr.end()   <= endWord());
6533 }
6534 
6535 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6536     return (start >= _bmStartWord && (start + size) <= endWord());
6537 }
6538 
6539 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6540   // verify that there are no 1 bits in the interval [left, right)
6541   FalseBitMapClosure falseBitMapClosure;
6542   iterate(&falseBitMapClosure, left, right);
6543 }
6544 
6545 void CMSBitMap::region_invariant(MemRegion mr)
6546 {
6547   assert_locked();
6548   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6549   assert(!mr.is_empty(), "unexpected empty region");
6550   assert(covers(mr), "mr should be covered by bit map");
6551   // convert address range into offset range
6552   size_t start_ofs = heapWordToOffset(mr.start());
6553   // Make sure that end() is appropriately aligned
6554   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6555                         (1 << (_shifter+LogHeapWordSize))),
6556          "Misaligned mr.end()");
6557   size_t end_ofs   = heapWordToOffset(mr.end());
6558   assert(end_ofs > start_ofs, "Should mark at least one bit");
6559 }
6560 
6561 #endif
6562 
6563 bool CMSMarkStack::allocate(size_t size) {
6564   // allocate a stack of the requisite depth
6565   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6566                    size * sizeof(oop)));
6567   if (!rs.is_reserved()) {
6568     warning("CMSMarkStack allocation failure");
6569     return false;
6570   }
6571   if (!_virtual_space.initialize(rs, rs.size())) {
6572     warning("CMSMarkStack backing store failure");
6573     return false;
6574   }
6575   assert(_virtual_space.committed_size() == rs.size(),
6576          "didn't reserve backing store for all of CMS stack?");
6577   _base = (oop*)(_virtual_space.low());
6578   _index = 0;
6579   _capacity = size;
6580   NOT_PRODUCT(_max_depth = 0);
6581   return true;
6582 }
6583 
6584 // XXX FIX ME !!! In the MT case we come in here holding a
6585 // leaf lock. For printing we need to take a further lock
6586 // which has lower rank. We need to recallibrate the two
6587 // lock-ranks involved in order to be able to rpint the
6588 // messages below. (Or defer the printing to the caller.
6589 // For now we take the expedient path of just disabling the
6590 // messages for the problematic case.)
6591 void CMSMarkStack::expand() {
6592   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6593   if (_capacity == MarkStackSizeMax) {
6594     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6595       // We print a warning message only once per CMS cycle.
6596       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6597     }
6598     return;
6599   }
6600   // Double capacity if possible
6601   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6602   // Do not give up existing stack until we have managed to
6603   // get the double capacity that we desired.
6604   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6605                    new_capacity * sizeof(oop)));
6606   if (rs.is_reserved()) {
6607     // Release the backing store associated with old stack
6608     _virtual_space.release();
6609     // Reinitialize virtual space for new stack
6610     if (!_virtual_space.initialize(rs, rs.size())) {
6611       fatal("Not enough swap for expanded marking stack");
6612     }
6613     _base = (oop*)(_virtual_space.low());
6614     _index = 0;
6615     _capacity = new_capacity;
6616   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6617     // Failed to double capacity, continue;
6618     // we print a detail message only once per CMS cycle.
6619     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6620             SIZE_FORMAT"K",
6621             _capacity / K, new_capacity / K);
6622   }
6623 }
6624 
6625 
6626 // Closures
6627 // XXX: there seems to be a lot of code  duplication here;
6628 // should refactor and consolidate common code.
6629 
6630 // This closure is used to mark refs into the CMS generation in
6631 // the CMS bit map. Called at the first checkpoint. This closure
6632 // assumes that we do not need to re-mark dirty cards; if the CMS
6633 // generation on which this is used is not an oldest (modulo perm gen)
6634 // generation then this will lose younger_gen cards!
6635 
6636 MarkRefsIntoClosure::MarkRefsIntoClosure(
6637   MemRegion span, CMSBitMap* bitMap):
6638     _span(span),
6639     _bitMap(bitMap)
6640 {
6641     assert(_ref_processor == NULL, "deliberately left NULL");
6642     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6643 }
6644 
6645 void MarkRefsIntoClosure::do_oop(oop obj) {
6646   // if p points into _span, then mark corresponding bit in _markBitMap
6647   assert(obj->is_oop(), "expected an oop");
6648   HeapWord* addr = (HeapWord*)obj;
6649   if (_span.contains(addr)) {
6650     // this should be made more efficient
6651     _bitMap->mark(addr);
6652   }
6653 }
6654 
6655 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6656 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6657 
6658 // A variant of the above, used for CMS marking verification.
6659 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6660   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6661     _span(span),
6662     _verification_bm(verification_bm),
6663     _cms_bm(cms_bm)
6664 {
6665     assert(_ref_processor == NULL, "deliberately left NULL");
6666     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6667 }
6668 
6669 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6670   // if p points into _span, then mark corresponding bit in _markBitMap
6671   assert(obj->is_oop(), "expected an oop");
6672   HeapWord* addr = (HeapWord*)obj;
6673   if (_span.contains(addr)) {
6674     _verification_bm->mark(addr);
6675     if (!_cms_bm->isMarked(addr)) {
6676       oop(addr)->print();
6677       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6678       fatal("... aborting");
6679     }
6680   }
6681 }
6682 
6683 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6684 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6685 
6686 //////////////////////////////////////////////////
6687 // MarkRefsIntoAndScanClosure
6688 //////////////////////////////////////////////////
6689 
6690 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6691                                                        ReferenceProcessor* rp,
6692                                                        CMSBitMap* bit_map,
6693                                                        CMSBitMap* mod_union_table,
6694                                                        CMSMarkStack*  mark_stack,
6695                                                        CMSMarkStack*  revisit_stack,
6696                                                        CMSCollector* collector,
6697                                                        bool should_yield,
6698                                                        bool concurrent_precleaning):
6699   _collector(collector),
6700   _span(span),
6701   _bit_map(bit_map),
6702   _mark_stack(mark_stack),
6703   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6704                       mark_stack, revisit_stack, concurrent_precleaning),
6705   _yield(should_yield),
6706   _concurrent_precleaning(concurrent_precleaning),
6707   _freelistLock(NULL)
6708 {
6709   _ref_processor = rp;
6710   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6711 }
6712 
6713 // This closure is used to mark refs into the CMS generation at the
6714 // second (final) checkpoint, and to scan and transitively follow
6715 // the unmarked oops. It is also used during the concurrent precleaning
6716 // phase while scanning objects on dirty cards in the CMS generation.
6717 // The marks are made in the marking bit map and the marking stack is
6718 // used for keeping the (newly) grey objects during the scan.
6719 // The parallel version (Par_...) appears further below.
6720 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6721   if (obj != NULL) {
6722     assert(obj->is_oop(), "expected an oop");
6723     HeapWord* addr = (HeapWord*)obj;
6724     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6725     assert(_collector->overflow_list_is_empty(),
6726            "overflow list should be empty");
6727     if (_span.contains(addr) &&
6728         !_bit_map->isMarked(addr)) {
6729       // mark bit map (object is now grey)
6730       _bit_map->mark(addr);
6731       // push on marking stack (stack should be empty), and drain the
6732       // stack by applying this closure to the oops in the oops popped
6733       // from the stack (i.e. blacken the grey objects)
6734       bool res = _mark_stack->push(obj);
6735       assert(res, "Should have space to push on empty stack");
6736       do {
6737         oop new_oop = _mark_stack->pop();
6738         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6739         assert(new_oop->is_parsable(), "Found unparsable oop");
6740         assert(_bit_map->isMarked((HeapWord*)new_oop),
6741                "only grey objects on this stack");
6742         // iterate over the oops in this oop, marking and pushing
6743         // the ones in CMS heap (i.e. in _span).
6744         new_oop->oop_iterate(&_pushAndMarkClosure);
6745         // check if it's time to yield
6746         do_yield_check();
6747       } while (!_mark_stack->isEmpty() ||
6748                (!_concurrent_precleaning && take_from_overflow_list()));
6749         // if marking stack is empty, and we are not doing this
6750         // during precleaning, then check the overflow list
6751     }
6752     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6753     assert(_collector->overflow_list_is_empty(),
6754            "overflow list was drained above");
6755     // We could restore evacuated mark words, if any, used for
6756     // overflow list links here because the overflow list is
6757     // provably empty here. That would reduce the maximum
6758     // size requirements for preserved_{oop,mark}_stack.
6759     // But we'll just postpone it until we are all done
6760     // so we can just stream through.
6761     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6762       _collector->restore_preserved_marks_if_any();
6763       assert(_collector->no_preserved_marks(), "No preserved marks");
6764     }
6765     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6766            "All preserved marks should have been restored above");
6767   }
6768 }
6769 
6770 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6771 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6772 
6773 void MarkRefsIntoAndScanClosure::do_yield_work() {
6774   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6775          "CMS thread should hold CMS token");
6776   assert_lock_strong(_freelistLock);
6777   assert_lock_strong(_bit_map->lock());
6778   // relinquish the free_list_lock and bitMaplock()
6779   DEBUG_ONLY(RememberKlassesChecker mux(false);)
6780   _bit_map->lock()->unlock();
6781   _freelistLock->unlock();
6782   ConcurrentMarkSweepThread::desynchronize(true);
6783   ConcurrentMarkSweepThread::acknowledge_yield_request();
6784   _collector->stopTimer();
6785   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6786   if (PrintCMSStatistics != 0) {
6787     _collector->incrementYields();
6788   }
6789   _collector->icms_wait();
6790 
6791   // See the comment in coordinator_yield()
6792   for (unsigned i = 0;
6793        i < CMSYieldSleepCount &&
6794        ConcurrentMarkSweepThread::should_yield() &&
6795        !CMSCollector::foregroundGCIsActive();
6796        ++i) {
6797     os::sleep(Thread::current(), 1, false);
6798     ConcurrentMarkSweepThread::acknowledge_yield_request();
6799   }
6800 
6801   ConcurrentMarkSweepThread::synchronize(true);
6802   _freelistLock->lock_without_safepoint_check();
6803   _bit_map->lock()->lock_without_safepoint_check();
6804   _collector->startTimer();
6805 }
6806 
6807 ///////////////////////////////////////////////////////////
6808 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6809 //                                 MarkRefsIntoAndScanClosure
6810 ///////////////////////////////////////////////////////////
6811 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6812   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6813   CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack*  revisit_stack):
6814   _span(span),
6815   _bit_map(bit_map),
6816   _work_queue(work_queue),
6817   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6818                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6819   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
6820                           revisit_stack)
6821 {
6822   _ref_processor = rp;
6823   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6824 }
6825 
6826 // This closure is used to mark refs into the CMS generation at the
6827 // second (final) checkpoint, and to scan and transitively follow
6828 // the unmarked oops. The marks are made in the marking bit map and
6829 // the work_queue is used for keeping the (newly) grey objects during
6830 // the scan phase whence they are also available for stealing by parallel
6831 // threads. Since the marking bit map is shared, updates are
6832 // synchronized (via CAS).
6833 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6834   if (obj != NULL) {
6835     // Ignore mark word because this could be an already marked oop
6836     // that may be chained at the end of the overflow list.
6837     assert(obj->is_oop(true), "expected an oop");
6838     HeapWord* addr = (HeapWord*)obj;
6839     if (_span.contains(addr) &&
6840         !_bit_map->isMarked(addr)) {
6841       // mark bit map (object will become grey):
6842       // It is possible for several threads to be
6843       // trying to "claim" this object concurrently;
6844       // the unique thread that succeeds in marking the
6845       // object first will do the subsequent push on
6846       // to the work queue (or overflow list).
6847       if (_bit_map->par_mark(addr)) {
6848         // push on work_queue (which may not be empty), and trim the
6849         // queue to an appropriate length by applying this closure to
6850         // the oops in the oops popped from the stack (i.e. blacken the
6851         // grey objects)
6852         bool res = _work_queue->push(obj);
6853         assert(res, "Low water mark should be less than capacity?");
6854         trim_queue(_low_water_mark);
6855       } // Else, another thread claimed the object
6856     }
6857   }
6858 }
6859 
6860 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6861 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6862 
6863 // This closure is used to rescan the marked objects on the dirty cards
6864 // in the mod union table and the card table proper.
6865 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6866   oop p, MemRegion mr) {
6867 
6868   size_t size = 0;
6869   HeapWord* addr = (HeapWord*)p;
6870   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6871   assert(_span.contains(addr), "we are scanning the CMS generation");
6872   // check if it's time to yield
6873   if (do_yield_check()) {
6874     // We yielded for some foreground stop-world work,
6875     // and we have been asked to abort this ongoing preclean cycle.
6876     return 0;
6877   }
6878   if (_bitMap->isMarked(addr)) {
6879     // it's marked; is it potentially uninitialized?
6880     if (p->klass_or_null() != NULL) {
6881       // If is_conc_safe is false, the object may be undergoing
6882       // change by the VM outside a safepoint.  Don't try to
6883       // scan it, but rather leave it for the remark phase.
6884       if (CMSPermGenPrecleaningEnabled &&
6885           (!p->is_conc_safe() || !p->is_parsable())) {
6886         // Signal precleaning to redirty the card since
6887         // the klass pointer is already installed.
6888         assert(size == 0, "Initial value");
6889       } else {
6890         assert(p->is_parsable(), "must be parsable.");
6891         // an initialized object; ignore mark word in verification below
6892         // since we are running concurrent with mutators
6893         assert(p->is_oop(true), "should be an oop");
6894         if (p->is_objArray()) {
6895           // objArrays are precisely marked; restrict scanning
6896           // to dirty cards only.
6897           size = CompactibleFreeListSpace::adjustObjectSize(
6898                    p->oop_iterate(_scanningClosure, mr));
6899         } else {
6900           // A non-array may have been imprecisely marked; we need
6901           // to scan object in its entirety.
6902           size = CompactibleFreeListSpace::adjustObjectSize(
6903                    p->oop_iterate(_scanningClosure));
6904         }
6905         #ifdef DEBUG
6906           size_t direct_size =
6907             CompactibleFreeListSpace::adjustObjectSize(p->size());
6908           assert(size == direct_size, "Inconsistency in size");
6909           assert(size >= 3, "Necessary for Printezis marks to work");
6910           if (!_bitMap->isMarked(addr+1)) {
6911             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6912           } else {
6913             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6914             assert(_bitMap->isMarked(addr+size-1),
6915                    "inconsistent Printezis mark");
6916           }
6917         #endif // DEBUG
6918       }
6919     } else {
6920       // an unitialized object
6921       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6922       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6923       size = pointer_delta(nextOneAddr + 1, addr);
6924       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6925              "alignment problem");
6926       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6927       // will dirty the card when the klass pointer is installed in the
6928       // object (signalling the completion of initialization).
6929     }
6930   } else {
6931     // Either a not yet marked object or an uninitialized object
6932     if (p->klass_or_null() == NULL || !p->is_parsable()) {
6933       // An uninitialized object, skip to the next card, since
6934       // we may not be able to read its P-bits yet.
6935       assert(size == 0, "Initial value");
6936     } else {
6937       // An object not (yet) reached by marking: we merely need to
6938       // compute its size so as to go look at the next block.
6939       assert(p->is_oop(true), "should be an oop");
6940       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6941     }
6942   }
6943   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6944   return size;
6945 }
6946 
6947 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6948   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6949          "CMS thread should hold CMS token");
6950   assert_lock_strong(_freelistLock);
6951   assert_lock_strong(_bitMap->lock());
6952   DEBUG_ONLY(RememberKlassesChecker mux(false);)
6953   // relinquish the free_list_lock and bitMaplock()
6954   _bitMap->lock()->unlock();
6955   _freelistLock->unlock();
6956   ConcurrentMarkSweepThread::desynchronize(true);
6957   ConcurrentMarkSweepThread::acknowledge_yield_request();
6958   _collector->stopTimer();
6959   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6960   if (PrintCMSStatistics != 0) {
6961     _collector->incrementYields();
6962   }
6963   _collector->icms_wait();
6964 
6965   // See the comment in coordinator_yield()
6966   for (unsigned i = 0; i < CMSYieldSleepCount &&
6967                    ConcurrentMarkSweepThread::should_yield() &&
6968                    !CMSCollector::foregroundGCIsActive(); ++i) {
6969     os::sleep(Thread::current(), 1, false);
6970     ConcurrentMarkSweepThread::acknowledge_yield_request();
6971   }
6972 
6973   ConcurrentMarkSweepThread::synchronize(true);
6974   _freelistLock->lock_without_safepoint_check();
6975   _bitMap->lock()->lock_without_safepoint_check();
6976   _collector->startTimer();
6977 }
6978 
6979 
6980 //////////////////////////////////////////////////////////////////
6981 // SurvivorSpacePrecleanClosure
6982 //////////////////////////////////////////////////////////////////
6983 // This (single-threaded) closure is used to preclean the oops in
6984 // the survivor spaces.
6985 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6986 
6987   HeapWord* addr = (HeapWord*)p;
6988   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6989   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6990   assert(p->klass_or_null() != NULL, "object should be initializd");
6991   assert(p->is_parsable(), "must be parsable.");
6992   // an initialized object; ignore mark word in verification below
6993   // since we are running concurrent with mutators
6994   assert(p->is_oop(true), "should be an oop");
6995   // Note that we do not yield while we iterate over
6996   // the interior oops of p, pushing the relevant ones
6997   // on our marking stack.
6998   size_t size = p->oop_iterate(_scanning_closure);
6999   do_yield_check();
7000   // Observe that below, we do not abandon the preclean
7001   // phase as soon as we should; rather we empty the
7002   // marking stack before returning. This is to satisfy
7003   // some existing assertions. In general, it may be a
7004   // good idea to abort immediately and complete the marking
7005   // from the grey objects at a later time.
7006   while (!_mark_stack->isEmpty()) {
7007     oop new_oop = _mark_stack->pop();
7008     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7009     assert(new_oop->is_parsable(), "Found unparsable oop");
7010     assert(_bit_map->isMarked((HeapWord*)new_oop),
7011            "only grey objects on this stack");
7012     // iterate over the oops in this oop, marking and pushing
7013     // the ones in CMS heap (i.e. in _span).
7014     new_oop->oop_iterate(_scanning_closure);
7015     // check if it's time to yield
7016     do_yield_check();
7017   }
7018   unsigned int after_count =
7019     GenCollectedHeap::heap()->total_collections();
7020   bool abort = (_before_count != after_count) ||
7021                _collector->should_abort_preclean();
7022   return abort ? 0 : size;
7023 }
7024 
7025 void SurvivorSpacePrecleanClosure::do_yield_work() {
7026   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7027          "CMS thread should hold CMS token");
7028   assert_lock_strong(_bit_map->lock());
7029   DEBUG_ONLY(RememberKlassesChecker smx(false);)
7030   // Relinquish the bit map lock
7031   _bit_map->lock()->unlock();
7032   ConcurrentMarkSweepThread::desynchronize(true);
7033   ConcurrentMarkSweepThread::acknowledge_yield_request();
7034   _collector->stopTimer();
7035   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7036   if (PrintCMSStatistics != 0) {
7037     _collector->incrementYields();
7038   }
7039   _collector->icms_wait();
7040 
7041   // See the comment in coordinator_yield()
7042   for (unsigned i = 0; i < CMSYieldSleepCount &&
7043                        ConcurrentMarkSweepThread::should_yield() &&
7044                        !CMSCollector::foregroundGCIsActive(); ++i) {
7045     os::sleep(Thread::current(), 1, false);
7046     ConcurrentMarkSweepThread::acknowledge_yield_request();
7047   }
7048 
7049   ConcurrentMarkSweepThread::synchronize(true);
7050   _bit_map->lock()->lock_without_safepoint_check();
7051   _collector->startTimer();
7052 }
7053 
7054 // This closure is used to rescan the marked objects on the dirty cards
7055 // in the mod union table and the card table proper. In the parallel
7056 // case, although the bitMap is shared, we do a single read so the
7057 // isMarked() query is "safe".
7058 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7059   // Ignore mark word because we are running concurrent with mutators
7060   assert(p->is_oop_or_null(true), "expected an oop or null");
7061   HeapWord* addr = (HeapWord*)p;
7062   assert(_span.contains(addr), "we are scanning the CMS generation");
7063   bool is_obj_array = false;
7064   #ifdef DEBUG
7065     if (!_parallel) {
7066       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7067       assert(_collector->overflow_list_is_empty(),
7068              "overflow list should be empty");
7069 
7070     }
7071   #endif // DEBUG
7072   if (_bit_map->isMarked(addr)) {
7073     // Obj arrays are precisely marked, non-arrays are not;
7074     // so we scan objArrays precisely and non-arrays in their
7075     // entirety.
7076     if (p->is_objArray()) {
7077       is_obj_array = true;
7078       if (_parallel) {
7079         p->oop_iterate(_par_scan_closure, mr);
7080       } else {
7081         p->oop_iterate(_scan_closure, mr);
7082       }
7083     } else {
7084       if (_parallel) {
7085         p->oop_iterate(_par_scan_closure);
7086       } else {
7087         p->oop_iterate(_scan_closure);
7088       }
7089     }
7090   }
7091   #ifdef DEBUG
7092     if (!_parallel) {
7093       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7094       assert(_collector->overflow_list_is_empty(),
7095              "overflow list should be empty");
7096 
7097     }
7098   #endif // DEBUG
7099   return is_obj_array;
7100 }
7101 
7102 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7103                         MemRegion span,
7104                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7105                         CMSMarkStack*  revisitStack,
7106                         bool should_yield, bool verifying):
7107   _collector(collector),
7108   _span(span),
7109   _bitMap(bitMap),
7110   _mut(&collector->_modUnionTable),
7111   _markStack(markStack),
7112   _revisitStack(revisitStack),
7113   _yield(should_yield),
7114   _skipBits(0)
7115 {
7116   assert(_markStack->isEmpty(), "stack should be empty");
7117   _finger = _bitMap->startWord();
7118   _threshold = _finger;
7119   assert(_collector->_restart_addr == NULL, "Sanity check");
7120   assert(_span.contains(_finger), "Out of bounds _finger?");
7121   DEBUG_ONLY(_verifying = verifying;)
7122 }
7123 
7124 void MarkFromRootsClosure::reset(HeapWord* addr) {
7125   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7126   assert(_span.contains(addr), "Out of bounds _finger?");
7127   _finger = addr;
7128   _threshold = (HeapWord*)round_to(
7129                  (intptr_t)_finger, CardTableModRefBS::card_size);
7130 }
7131 
7132 // Should revisit to see if this should be restructured for
7133 // greater efficiency.
7134 bool MarkFromRootsClosure::do_bit(size_t offset) {
7135   if (_skipBits > 0) {
7136     _skipBits--;
7137     return true;
7138   }
7139   // convert offset into a HeapWord*
7140   HeapWord* addr = _bitMap->startWord() + offset;
7141   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7142          "address out of range");
7143   assert(_bitMap->isMarked(addr), "tautology");
7144   if (_bitMap->isMarked(addr+1)) {
7145     // this is an allocated but not yet initialized object
7146     assert(_skipBits == 0, "tautology");
7147     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7148     oop p = oop(addr);
7149     if (p->klass_or_null() == NULL || !p->is_parsable()) {
7150       DEBUG_ONLY(if (!_verifying) {)
7151         // We re-dirty the cards on which this object lies and increase
7152         // the _threshold so that we'll come back to scan this object
7153         // during the preclean or remark phase. (CMSCleanOnEnter)
7154         if (CMSCleanOnEnter) {
7155           size_t sz = _collector->block_size_using_printezis_bits(addr);
7156           HeapWord* end_card_addr   = (HeapWord*)round_to(
7157                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7158           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7159           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7160           // Bump _threshold to end_card_addr; note that
7161           // _threshold cannot possibly exceed end_card_addr, anyhow.
7162           // This prevents future clearing of the card as the scan proceeds
7163           // to the right.
7164           assert(_threshold <= end_card_addr,
7165                  "Because we are just scanning into this object");
7166           if (_threshold < end_card_addr) {
7167             _threshold = end_card_addr;
7168           }
7169           if (p->klass_or_null() != NULL) {
7170             // Redirty the range of cards...
7171             _mut->mark_range(redirty_range);
7172           } // ...else the setting of klass will dirty the card anyway.
7173         }
7174       DEBUG_ONLY(})
7175       return true;
7176     }
7177   }
7178   scanOopsInOop(addr);
7179   return true;
7180 }
7181 
7182 // We take a break if we've been at this for a while,
7183 // so as to avoid monopolizing the locks involved.
7184 void MarkFromRootsClosure::do_yield_work() {
7185   // First give up the locks, then yield, then re-lock
7186   // We should probably use a constructor/destructor idiom to
7187   // do this unlock/lock or modify the MutexUnlocker class to
7188   // serve our purpose. XXX
7189   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7190          "CMS thread should hold CMS token");
7191   assert_lock_strong(_bitMap->lock());
7192   DEBUG_ONLY(RememberKlassesChecker mux(false);)
7193   _bitMap->lock()->unlock();
7194   ConcurrentMarkSweepThread::desynchronize(true);
7195   ConcurrentMarkSweepThread::acknowledge_yield_request();
7196   _collector->stopTimer();
7197   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7198   if (PrintCMSStatistics != 0) {
7199     _collector->incrementYields();
7200   }
7201   _collector->icms_wait();
7202 
7203   // See the comment in coordinator_yield()
7204   for (unsigned i = 0; i < CMSYieldSleepCount &&
7205                        ConcurrentMarkSweepThread::should_yield() &&
7206                        !CMSCollector::foregroundGCIsActive(); ++i) {
7207     os::sleep(Thread::current(), 1, false);
7208     ConcurrentMarkSweepThread::acknowledge_yield_request();
7209   }
7210 
7211   ConcurrentMarkSweepThread::synchronize(true);
7212   _bitMap->lock()->lock_without_safepoint_check();
7213   _collector->startTimer();
7214 }
7215 
7216 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7217   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7218   assert(_markStack->isEmpty(),
7219          "should drain stack to limit stack usage");
7220   // convert ptr to an oop preparatory to scanning
7221   oop obj = oop(ptr);
7222   // Ignore mark word in verification below, since we
7223   // may be running concurrent with mutators.
7224   assert(obj->is_oop(true), "should be an oop");
7225   assert(_finger <= ptr, "_finger runneth ahead");
7226   // advance the finger to right end of this object
7227   _finger = ptr + obj->size();
7228   assert(_finger > ptr, "we just incremented it above");
7229   // On large heaps, it may take us some time to get through
7230   // the marking phase (especially if running iCMS). During
7231   // this time it's possible that a lot of mutations have
7232   // accumulated in the card table and the mod union table --
7233   // these mutation records are redundant until we have
7234   // actually traced into the corresponding card.
7235   // Here, we check whether advancing the finger would make
7236   // us cross into a new card, and if so clear corresponding
7237   // cards in the MUT (preclean them in the card-table in the
7238   // future).
7239 
7240   DEBUG_ONLY(if (!_verifying) {)
7241     // The clean-on-enter optimization is disabled by default,
7242     // until we fix 6178663.
7243     if (CMSCleanOnEnter && (_finger > _threshold)) {
7244       // [_threshold, _finger) represents the interval
7245       // of cards to be cleared  in MUT (or precleaned in card table).
7246       // The set of cards to be cleared is all those that overlap
7247       // with the interval [_threshold, _finger); note that
7248       // _threshold is always kept card-aligned but _finger isn't
7249       // always card-aligned.
7250       HeapWord* old_threshold = _threshold;
7251       assert(old_threshold == (HeapWord*)round_to(
7252               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7253              "_threshold should always be card-aligned");
7254       _threshold = (HeapWord*)round_to(
7255                      (intptr_t)_finger, CardTableModRefBS::card_size);
7256       MemRegion mr(old_threshold, _threshold);
7257       assert(!mr.is_empty(), "Control point invariant");
7258       assert(_span.contains(mr), "Should clear within span");
7259       // XXX When _finger crosses from old gen into perm gen
7260       // we may be doing unnecessary cleaning; do better in the
7261       // future by detecting that condition and clearing fewer
7262       // MUT/CT entries.
7263       _mut->clear_range(mr);
7264     }
7265   DEBUG_ONLY(})
7266   // Note: the finger doesn't advance while we drain
7267   // the stack below.
7268   PushOrMarkClosure pushOrMarkClosure(_collector,
7269                                       _span, _bitMap, _markStack,
7270                                       _revisitStack,
7271                                       _finger, this);
7272   bool res = _markStack->push(obj);
7273   assert(res, "Empty non-zero size stack should have space for single push");
7274   while (!_markStack->isEmpty()) {
7275     oop new_oop = _markStack->pop();
7276     // Skip verifying header mark word below because we are
7277     // running concurrent with mutators.
7278     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7279     // now scan this oop's oops
7280     new_oop->oop_iterate(&pushOrMarkClosure);
7281     do_yield_check();
7282   }
7283   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7284 }
7285 
7286 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7287                        CMSCollector* collector, MemRegion span,
7288                        CMSBitMap* bit_map,
7289                        OopTaskQueue* work_queue,
7290                        CMSMarkStack*  overflow_stack,
7291                        CMSMarkStack*  revisit_stack,
7292                        bool should_yield):
7293   _collector(collector),
7294   _whole_span(collector->_span),
7295   _span(span),
7296   _bit_map(bit_map),
7297   _mut(&collector->_modUnionTable),
7298   _work_queue(work_queue),
7299   _overflow_stack(overflow_stack),
7300   _revisit_stack(revisit_stack),
7301   _yield(should_yield),
7302   _skip_bits(0),
7303   _task(task)
7304 {
7305   assert(_work_queue->size() == 0, "work_queue should be empty");
7306   _finger = span.start();
7307   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7308   assert(_span.contains(_finger), "Out of bounds _finger?");
7309 }
7310 
7311 // Should revisit to see if this should be restructured for
7312 // greater efficiency.
7313 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7314   if (_skip_bits > 0) {
7315     _skip_bits--;
7316     return true;
7317   }
7318   // convert offset into a HeapWord*
7319   HeapWord* addr = _bit_map->startWord() + offset;
7320   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7321          "address out of range");
7322   assert(_bit_map->isMarked(addr), "tautology");
7323   if (_bit_map->isMarked(addr+1)) {
7324     // this is an allocated object that might not yet be initialized
7325     assert(_skip_bits == 0, "tautology");
7326     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7327     oop p = oop(addr);
7328     if (p->klass_or_null() == NULL || !p->is_parsable()) {
7329       // in the case of Clean-on-Enter optimization, redirty card
7330       // and avoid clearing card by increasing  the threshold.
7331       return true;
7332     }
7333   }
7334   scan_oops_in_oop(addr);
7335   return true;
7336 }
7337 
7338 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7339   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7340   // Should we assert that our work queue is empty or
7341   // below some drain limit?
7342   assert(_work_queue->size() == 0,
7343          "should drain stack to limit stack usage");
7344   // convert ptr to an oop preparatory to scanning
7345   oop obj = oop(ptr);
7346   // Ignore mark word in verification below, since we
7347   // may be running concurrent with mutators.
7348   assert(obj->is_oop(true), "should be an oop");
7349   assert(_finger <= ptr, "_finger runneth ahead");
7350   // advance the finger to right end of this object
7351   _finger = ptr + obj->size();
7352   assert(_finger > ptr, "we just incremented it above");
7353   // On large heaps, it may take us some time to get through
7354   // the marking phase (especially if running iCMS). During
7355   // this time it's possible that a lot of mutations have
7356   // accumulated in the card table and the mod union table --
7357   // these mutation records are redundant until we have
7358   // actually traced into the corresponding card.
7359   // Here, we check whether advancing the finger would make
7360   // us cross into a new card, and if so clear corresponding
7361   // cards in the MUT (preclean them in the card-table in the
7362   // future).
7363 
7364   // The clean-on-enter optimization is disabled by default,
7365   // until we fix 6178663.
7366   if (CMSCleanOnEnter && (_finger > _threshold)) {
7367     // [_threshold, _finger) represents the interval
7368     // of cards to be cleared  in MUT (or precleaned in card table).
7369     // The set of cards to be cleared is all those that overlap
7370     // with the interval [_threshold, _finger); note that
7371     // _threshold is always kept card-aligned but _finger isn't
7372     // always card-aligned.
7373     HeapWord* old_threshold = _threshold;
7374     assert(old_threshold == (HeapWord*)round_to(
7375             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7376            "_threshold should always be card-aligned");
7377     _threshold = (HeapWord*)round_to(
7378                    (intptr_t)_finger, CardTableModRefBS::card_size);
7379     MemRegion mr(old_threshold, _threshold);
7380     assert(!mr.is_empty(), "Control point invariant");
7381     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7382     // XXX When _finger crosses from old gen into perm gen
7383     // we may be doing unnecessary cleaning; do better in the
7384     // future by detecting that condition and clearing fewer
7385     // MUT/CT entries.
7386     _mut->clear_range(mr);
7387   }
7388 
7389   // Note: the local finger doesn't advance while we drain
7390   // the stack below, but the global finger sure can and will.
7391   HeapWord** gfa = _task->global_finger_addr();
7392   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7393                                       _span, _bit_map,
7394                                       _work_queue,
7395                                       _overflow_stack,
7396                                       _revisit_stack,
7397                                       _finger,
7398                                       gfa, this);
7399   bool res = _work_queue->push(obj);   // overflow could occur here
7400   assert(res, "Will hold once we use workqueues");
7401   while (true) {
7402     oop new_oop;
7403     if (!_work_queue->pop_local(new_oop)) {
7404       // We emptied our work_queue; check if there's stuff that can
7405       // be gotten from the overflow stack.
7406       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7407             _overflow_stack, _work_queue)) {
7408         do_yield_check();
7409         continue;
7410       } else {  // done
7411         break;
7412       }
7413     }
7414     // Skip verifying header mark word below because we are
7415     // running concurrent with mutators.
7416     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7417     // now scan this oop's oops
7418     new_oop->oop_iterate(&pushOrMarkClosure);
7419     do_yield_check();
7420   }
7421   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7422 }
7423 
7424 // Yield in response to a request from VM Thread or
7425 // from mutators.
7426 void Par_MarkFromRootsClosure::do_yield_work() {
7427   assert(_task != NULL, "sanity");
7428   _task->yield();
7429 }
7430 
7431 // A variant of the above used for verifying CMS marking work.
7432 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7433                         MemRegion span,
7434                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7435                         CMSMarkStack*  mark_stack):
7436   _collector(collector),
7437   _span(span),
7438   _verification_bm(verification_bm),
7439   _cms_bm(cms_bm),
7440   _mark_stack(mark_stack),
7441   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7442                       mark_stack)
7443 {
7444   assert(_mark_stack->isEmpty(), "stack should be empty");
7445   _finger = _verification_bm->startWord();
7446   assert(_collector->_restart_addr == NULL, "Sanity check");
7447   assert(_span.contains(_finger), "Out of bounds _finger?");
7448 }
7449 
7450 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7451   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7452   assert(_span.contains(addr), "Out of bounds _finger?");
7453   _finger = addr;
7454 }
7455 
7456 // Should revisit to see if this should be restructured for
7457 // greater efficiency.
7458 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7459   // convert offset into a HeapWord*
7460   HeapWord* addr = _verification_bm->startWord() + offset;
7461   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7462          "address out of range");
7463   assert(_verification_bm->isMarked(addr), "tautology");
7464   assert(_cms_bm->isMarked(addr), "tautology");
7465 
7466   assert(_mark_stack->isEmpty(),
7467          "should drain stack to limit stack usage");
7468   // convert addr to an oop preparatory to scanning
7469   oop obj = oop(addr);
7470   assert(obj->is_oop(), "should be an oop");
7471   assert(_finger <= addr, "_finger runneth ahead");
7472   // advance the finger to right end of this object
7473   _finger = addr + obj->size();
7474   assert(_finger > addr, "we just incremented it above");
7475   // Note: the finger doesn't advance while we drain
7476   // the stack below.
7477   bool res = _mark_stack->push(obj);
7478   assert(res, "Empty non-zero size stack should have space for single push");
7479   while (!_mark_stack->isEmpty()) {
7480     oop new_oop = _mark_stack->pop();
7481     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7482     // now scan this oop's oops
7483     new_oop->oop_iterate(&_pam_verify_closure);
7484   }
7485   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7486   return true;
7487 }
7488 
7489 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7490   CMSCollector* collector, MemRegion span,
7491   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7492   CMSMarkStack*  mark_stack):
7493   OopClosure(collector->ref_processor()),
7494   _collector(collector),
7495   _span(span),
7496   _verification_bm(verification_bm),
7497   _cms_bm(cms_bm),
7498   _mark_stack(mark_stack)
7499 { }
7500 
7501 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7502 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7503 
7504 // Upon stack overflow, we discard (part of) the stack,
7505 // remembering the least address amongst those discarded
7506 // in CMSCollector's _restart_address.
7507 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7508   // Remember the least grey address discarded
7509   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7510   _collector->lower_restart_addr(ra);
7511   _mark_stack->reset();  // discard stack contents
7512   _mark_stack->expand(); // expand the stack if possible
7513 }
7514 
7515 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7516   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7517   HeapWord* addr = (HeapWord*)obj;
7518   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7519     // Oop lies in _span and isn't yet grey or black
7520     _verification_bm->mark(addr);            // now grey
7521     if (!_cms_bm->isMarked(addr)) {
7522       oop(addr)->print();
7523       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7524                              addr);
7525       fatal("... aborting");
7526     }
7527 
7528     if (!_mark_stack->push(obj)) { // stack overflow
7529       if (PrintCMSStatistics != 0) {
7530         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7531                                SIZE_FORMAT, _mark_stack->capacity());
7532       }
7533       assert(_mark_stack->isFull(), "Else push should have succeeded");
7534       handle_stack_overflow(addr);
7535     }
7536     // anything including and to the right of _finger
7537     // will be scanned as we iterate over the remainder of the
7538     // bit map
7539   }
7540 }
7541 
7542 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7543                      MemRegion span,
7544                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7545                      CMSMarkStack*  revisitStack,
7546                      HeapWord* finger, MarkFromRootsClosure* parent) :
7547   KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
7548   _span(span),
7549   _bitMap(bitMap),
7550   _markStack(markStack),
7551   _finger(finger),
7552   _parent(parent)
7553 { }
7554 
7555 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7556                      MemRegion span,
7557                      CMSBitMap* bit_map,
7558                      OopTaskQueue* work_queue,
7559                      CMSMarkStack*  overflow_stack,
7560                      CMSMarkStack*  revisit_stack,
7561                      HeapWord* finger,
7562                      HeapWord** global_finger_addr,
7563                      Par_MarkFromRootsClosure* parent) :
7564   Par_KlassRememberingOopClosure(collector,
7565                             collector->ref_processor(),
7566                             revisit_stack),
7567   _whole_span(collector->_span),
7568   _span(span),
7569   _bit_map(bit_map),
7570   _work_queue(work_queue),
7571   _overflow_stack(overflow_stack),
7572   _finger(finger),
7573   _global_finger_addr(global_finger_addr),
7574   _parent(parent)
7575 { }
7576 
7577 // Assumes thread-safe access by callers, who are
7578 // responsible for mutual exclusion.
7579 void CMSCollector::lower_restart_addr(HeapWord* low) {
7580   assert(_span.contains(low), "Out of bounds addr");
7581   if (_restart_addr == NULL) {
7582     _restart_addr = low;
7583   } else {
7584     _restart_addr = MIN2(_restart_addr, low);
7585   }
7586 }
7587 
7588 // Upon stack overflow, we discard (part of) the stack,
7589 // remembering the least address amongst those discarded
7590 // in CMSCollector's _restart_address.
7591 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7592   // Remember the least grey address discarded
7593   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7594   _collector->lower_restart_addr(ra);
7595   _markStack->reset();  // discard stack contents
7596   _markStack->expand(); // expand the stack if possible
7597 }
7598 
7599 // Upon stack overflow, we discard (part of) the stack,
7600 // remembering the least address amongst those discarded
7601 // in CMSCollector's _restart_address.
7602 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7603   // We need to do this under a mutex to prevent other
7604   // workers from interfering with the work done below.
7605   MutexLockerEx ml(_overflow_stack->par_lock(),
7606                    Mutex::_no_safepoint_check_flag);
7607   // Remember the least grey address discarded
7608   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7609   _collector->lower_restart_addr(ra);
7610   _overflow_stack->reset();  // discard stack contents
7611   _overflow_stack->expand(); // expand the stack if possible
7612 }
7613 
7614 void PushOrMarkClosure::do_oop(oop obj) {
7615   // Ignore mark word because we are running concurrent with mutators.
7616   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7617   HeapWord* addr = (HeapWord*)obj;
7618   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7619     // Oop lies in _span and isn't yet grey or black
7620     _bitMap->mark(addr);            // now grey
7621     if (addr < _finger) {
7622       // the bit map iteration has already either passed, or
7623       // sampled, this bit in the bit map; we'll need to
7624       // use the marking stack to scan this oop's oops.
7625       bool simulate_overflow = false;
7626       NOT_PRODUCT(
7627         if (CMSMarkStackOverflowALot &&
7628             _collector->simulate_overflow()) {
7629           // simulate a stack overflow
7630           simulate_overflow = true;
7631         }
7632       )
7633       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7634         if (PrintCMSStatistics != 0) {
7635           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7636                                  SIZE_FORMAT, _markStack->capacity());
7637         }
7638         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7639         handle_stack_overflow(addr);
7640       }
7641     }
7642     // anything including and to the right of _finger
7643     // will be scanned as we iterate over the remainder of the
7644     // bit map
7645     do_yield_check();
7646   }
7647 }
7648 
7649 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7650 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7651 
7652 void Par_PushOrMarkClosure::do_oop(oop obj) {
7653   // Ignore mark word because we are running concurrent with mutators.
7654   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7655   HeapWord* addr = (HeapWord*)obj;
7656   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7657     // Oop lies in _span and isn't yet grey or black
7658     // We read the global_finger (volatile read) strictly after marking oop
7659     bool res = _bit_map->par_mark(addr);    // now grey
7660     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7661     // Should we push this marked oop on our stack?
7662     // -- if someone else marked it, nothing to do
7663     // -- if target oop is above global finger nothing to do
7664     // -- if target oop is in chunk and above local finger
7665     //      then nothing to do
7666     // -- else push on work queue
7667     if (   !res       // someone else marked it, they will deal with it
7668         || (addr >= *gfa)  // will be scanned in a later task
7669         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7670       return;
7671     }
7672     // the bit map iteration has already either passed, or
7673     // sampled, this bit in the bit map; we'll need to
7674     // use the marking stack to scan this oop's oops.
7675     bool simulate_overflow = false;
7676     NOT_PRODUCT(
7677       if (CMSMarkStackOverflowALot &&
7678           _collector->simulate_overflow()) {
7679         // simulate a stack overflow
7680         simulate_overflow = true;
7681       }
7682     )
7683     if (simulate_overflow ||
7684         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7685       // stack overflow
7686       if (PrintCMSStatistics != 0) {
7687         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7688                                SIZE_FORMAT, _overflow_stack->capacity());
7689       }
7690       // We cannot assert that the overflow stack is full because
7691       // it may have been emptied since.
7692       assert(simulate_overflow ||
7693              _work_queue->size() == _work_queue->max_elems(),
7694             "Else push should have succeeded");
7695       handle_stack_overflow(addr);
7696     }
7697     do_yield_check();
7698   }
7699 }
7700 
7701 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7702 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7703 
7704 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
7705                                              ReferenceProcessor* rp,
7706                                              CMSMarkStack* revisit_stack) :
7707   OopClosure(rp),
7708   _collector(collector),
7709   _revisit_stack(revisit_stack),
7710   _should_remember_klasses(collector->should_unload_classes()) {}
7711 
7712 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7713                                        MemRegion span,
7714                                        ReferenceProcessor* rp,
7715                                        CMSBitMap* bit_map,
7716                                        CMSBitMap* mod_union_table,
7717                                        CMSMarkStack*  mark_stack,
7718                                        CMSMarkStack*  revisit_stack,
7719                                        bool           concurrent_precleaning):
7720   KlassRememberingOopClosure(collector, rp, revisit_stack),
7721   _span(span),
7722   _bit_map(bit_map),
7723   _mod_union_table(mod_union_table),
7724   _mark_stack(mark_stack),
7725   _concurrent_precleaning(concurrent_precleaning)
7726 {
7727   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7728 }
7729 
7730 // Grey object rescan during pre-cleaning and second checkpoint phases --
7731 // the non-parallel version (the parallel version appears further below.)
7732 void PushAndMarkClosure::do_oop(oop obj) {
7733   // Ignore mark word verification. If during concurrent precleaning,
7734   // the object monitor may be locked. If during the checkpoint
7735   // phases, the object may already have been reached by a  different
7736   // path and may be at the end of the global overflow list (so
7737   // the mark word may be NULL).
7738   assert(obj->is_oop_or_null(true /* ignore mark word */),
7739          "expected an oop or NULL");
7740   HeapWord* addr = (HeapWord*)obj;
7741   // Check if oop points into the CMS generation
7742   // and is not marked
7743   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7744     // a white object ...
7745     _bit_map->mark(addr);         // ... now grey
7746     // push on the marking stack (grey set)
7747     bool simulate_overflow = false;
7748     NOT_PRODUCT(
7749       if (CMSMarkStackOverflowALot &&
7750           _collector->simulate_overflow()) {
7751         // simulate a stack overflow
7752         simulate_overflow = true;
7753       }
7754     )
7755     if (simulate_overflow || !_mark_stack->push(obj)) {
7756       if (_concurrent_precleaning) {
7757          // During precleaning we can just dirty the appropriate card(s)
7758          // in the mod union table, thus ensuring that the object remains
7759          // in the grey set  and continue. In the case of object arrays
7760          // we need to dirty all of the cards that the object spans,
7761          // since the rescan of object arrays will be limited to the
7762          // dirty cards.
7763          // Note that no one can be intefering with us in this action
7764          // of dirtying the mod union table, so no locking or atomics
7765          // are required.
7766          if (obj->is_objArray()) {
7767            size_t sz = obj->size();
7768            HeapWord* end_card_addr = (HeapWord*)round_to(
7769                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7770            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7771            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7772            _mod_union_table->mark_range(redirty_range);
7773          } else {
7774            _mod_union_table->mark(addr);
7775          }
7776          _collector->_ser_pmc_preclean_ovflw++;
7777       } else {
7778          // During the remark phase, we need to remember this oop
7779          // in the overflow list.
7780          _collector->push_on_overflow_list(obj);
7781          _collector->_ser_pmc_remark_ovflw++;
7782       }
7783     }
7784   }
7785 }
7786 
7787 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7788                                                MemRegion span,
7789                                                ReferenceProcessor* rp,
7790                                                CMSBitMap* bit_map,
7791                                                OopTaskQueue* work_queue,
7792                                                CMSMarkStack* revisit_stack):
7793   Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
7794   _span(span),
7795   _bit_map(bit_map),
7796   _work_queue(work_queue)
7797 {
7798   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7799 }
7800 
7801 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7802 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7803 
7804 // Grey object rescan during second checkpoint phase --
7805 // the parallel version.
7806 void Par_PushAndMarkClosure::do_oop(oop obj) {
7807   // In the assert below, we ignore the mark word because
7808   // this oop may point to an already visited object that is
7809   // on the overflow stack (in which case the mark word has
7810   // been hijacked for chaining into the overflow stack --
7811   // if this is the last object in the overflow stack then
7812   // its mark word will be NULL). Because this object may
7813   // have been subsequently popped off the global overflow
7814   // stack, and the mark word possibly restored to the prototypical
7815   // value, by the time we get to examined this failing assert in
7816   // the debugger, is_oop_or_null(false) may subsequently start
7817   // to hold.
7818   assert(obj->is_oop_or_null(true),
7819          "expected an oop or NULL");
7820   HeapWord* addr = (HeapWord*)obj;
7821   // Check if oop points into the CMS generation
7822   // and is not marked
7823   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7824     // a white object ...
7825     // If we manage to "claim" the object, by being the
7826     // first thread to mark it, then we push it on our
7827     // marking stack
7828     if (_bit_map->par_mark(addr)) {     // ... now grey
7829       // push on work queue (grey set)
7830       bool simulate_overflow = false;
7831       NOT_PRODUCT(
7832         if (CMSMarkStackOverflowALot &&
7833             _collector->par_simulate_overflow()) {
7834           // simulate a stack overflow
7835           simulate_overflow = true;
7836         }
7837       )
7838       if (simulate_overflow || !_work_queue->push(obj)) {
7839         _collector->par_push_on_overflow_list(obj);
7840         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7841       }
7842     } // Else, some other thread got there first
7843   }
7844 }
7845 
7846 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7847 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7848 
7849 void PushAndMarkClosure::remember_mdo(DataLayout* v) {
7850   // TBD
7851 }
7852 
7853 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
7854   // TBD
7855 }
7856 
7857 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7858   DEBUG_ONLY(RememberKlassesChecker mux(false);)
7859   Mutex* bml = _collector->bitMapLock();
7860   assert_lock_strong(bml);
7861   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7862          "CMS thread should hold CMS token");
7863 
7864   bml->unlock();
7865   ConcurrentMarkSweepThread::desynchronize(true);
7866 
7867   ConcurrentMarkSweepThread::acknowledge_yield_request();
7868 
7869   _collector->stopTimer();
7870   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7871   if (PrintCMSStatistics != 0) {
7872     _collector->incrementYields();
7873   }
7874   _collector->icms_wait();
7875 
7876   // See the comment in coordinator_yield()
7877   for (unsigned i = 0; i < CMSYieldSleepCount &&
7878                        ConcurrentMarkSweepThread::should_yield() &&
7879                        !CMSCollector::foregroundGCIsActive(); ++i) {
7880     os::sleep(Thread::current(), 1, false);
7881     ConcurrentMarkSweepThread::acknowledge_yield_request();
7882   }
7883 
7884   ConcurrentMarkSweepThread::synchronize(true);
7885   bml->lock();
7886 
7887   _collector->startTimer();
7888 }
7889 
7890 bool CMSPrecleanRefsYieldClosure::should_return() {
7891   if (ConcurrentMarkSweepThread::should_yield()) {
7892     do_yield_work();
7893   }
7894   return _collector->foregroundGCIsActive();
7895 }
7896 
7897 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7898   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7899          "mr should be aligned to start at a card boundary");
7900   // We'd like to assert:
7901   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7902   //        "mr should be a range of cards");
7903   // However, that would be too strong in one case -- the last
7904   // partition ends at _unallocated_block which, in general, can be
7905   // an arbitrary boundary, not necessarily card aligned.
7906   if (PrintCMSStatistics != 0) {
7907     _num_dirty_cards +=
7908          mr.word_size()/CardTableModRefBS::card_size_in_words;
7909   }
7910   _space->object_iterate_mem(mr, &_scan_cl);
7911 }
7912 
7913 SweepClosure::SweepClosure(CMSCollector* collector,
7914                            ConcurrentMarkSweepGeneration* g,
7915                            CMSBitMap* bitMap, bool should_yield) :
7916   _collector(collector),
7917   _g(g),
7918   _sp(g->cmsSpace()),
7919   _limit(_sp->sweep_limit()),
7920   _freelistLock(_sp->freelistLock()),
7921   _bitMap(bitMap),
7922   _yield(should_yield),
7923   _inFreeRange(false),           // No free range at beginning of sweep
7924   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7925   _lastFreeRangeCoalesced(false),
7926   _freeFinger(g->used_region().start())
7927 {
7928   NOT_PRODUCT(
7929     _numObjectsFreed = 0;
7930     _numWordsFreed   = 0;
7931     _numObjectsLive = 0;
7932     _numWordsLive = 0;
7933     _numObjectsAlreadyFree = 0;
7934     _numWordsAlreadyFree = 0;
7935     _last_fc = NULL;
7936 
7937     _sp->initializeIndexedFreeListArrayReturnedBytes();
7938     _sp->dictionary()->initialize_dict_returned_bytes();
7939   )
7940   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7941          "sweep _limit out of bounds");
7942   if (CMSTraceSweeper) {
7943     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7944                         _limit);
7945   }
7946 }
7947 
7948 void SweepClosure::print_on(outputStream* st) const {
7949   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7950                 _sp->bottom(), _sp->end());
7951   tty->print_cr("_limit = " PTR_FORMAT, _limit);
7952   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
7953   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
7954   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7955                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7956 }
7957 
7958 #ifndef PRODUCT
7959 // Assertion checking only:  no useful work in product mode --
7960 // however, if any of the flags below become product flags,
7961 // you may need to review this code to see if it needs to be
7962 // enabled in product mode.
7963 SweepClosure::~SweepClosure() {
7964   assert_lock_strong(_freelistLock);
7965   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7966          "sweep _limit out of bounds");
7967   if (inFreeRange()) {
7968     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7969     print();
7970     ShouldNotReachHere();
7971   }
7972   if (Verbose && PrintGC) {
7973     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7974                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7975     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
7976                            SIZE_FORMAT" bytes  "
7977       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7978       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7979       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7980     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7981                         * sizeof(HeapWord);
7982     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7983 
7984     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7985       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7986       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7987       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7988       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7989       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7990         indexListReturnedBytes);
7991       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7992         dict_returned_bytes);
7993     }
7994   }
7995   if (CMSTraceSweeper) {
7996     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7997                            _limit);
7998   }
7999 }
8000 #endif  // PRODUCT
8001 
8002 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8003     bool freeRangeInFreeLists) {
8004   if (CMSTraceSweeper) {
8005     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8006                freeFinger, freeRangeInFreeLists);
8007   }
8008   assert(!inFreeRange(), "Trampling existing free range");
8009   set_inFreeRange(true);
8010   set_lastFreeRangeCoalesced(false);
8011 
8012   set_freeFinger(freeFinger);
8013   set_freeRangeInFreeLists(freeRangeInFreeLists);
8014   if (CMSTestInFreeList) {
8015     if (freeRangeInFreeLists) {
8016       FreeChunk* fc = (FreeChunk*) freeFinger;
8017       assert(fc->is_free(), "A chunk on the free list should be free.");
8018       assert(fc->size() > 0, "Free range should have a size");
8019       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8020     }
8021   }
8022 }
8023 
8024 // Note that the sweeper runs concurrently with mutators. Thus,
8025 // it is possible for direct allocation in this generation to happen
8026 // in the middle of the sweep. Note that the sweeper also coalesces
8027 // contiguous free blocks. Thus, unless the sweeper and the allocator
8028 // synchronize appropriately freshly allocated blocks may get swept up.
8029 // This is accomplished by the sweeper locking the free lists while
8030 // it is sweeping. Thus blocks that are determined to be free are
8031 // indeed free. There is however one additional complication:
8032 // blocks that have been allocated since the final checkpoint and
8033 // mark, will not have been marked and so would be treated as
8034 // unreachable and swept up. To prevent this, the allocator marks
8035 // the bit map when allocating during the sweep phase. This leads,
8036 // however, to a further complication -- objects may have been allocated
8037 // but not yet initialized -- in the sense that the header isn't yet
8038 // installed. The sweeper can not then determine the size of the block
8039 // in order to skip over it. To deal with this case, we use a technique
8040 // (due to Printezis) to encode such uninitialized block sizes in the
8041 // bit map. Since the bit map uses a bit per every HeapWord, but the
8042 // CMS generation has a minimum object size of 3 HeapWords, it follows
8043 // that "normal marks" won't be adjacent in the bit map (there will
8044 // always be at least two 0 bits between successive 1 bits). We make use
8045 // of these "unused" bits to represent uninitialized blocks -- the bit
8046 // corresponding to the start of the uninitialized object and the next
8047 // bit are both set. Finally, a 1 bit marks the end of the object that
8048 // started with the two consecutive 1 bits to indicate its potentially
8049 // uninitialized state.
8050 
8051 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8052   FreeChunk* fc = (FreeChunk*)addr;
8053   size_t res;
8054 
8055   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8056   // than "addr == _limit" because although _limit was a block boundary when
8057   // we started the sweep, it may no longer be one because heap expansion
8058   // may have caused us to coalesce the block ending at the address _limit
8059   // with a newly expanded chunk (this happens when _limit was set to the
8060   // previous _end of the space), so we may have stepped past _limit:
8061   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8062   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8063     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8064            "sweep _limit out of bounds");
8065     assert(addr < _sp->end(), "addr out of bounds");
8066     // Flush any free range we might be holding as a single
8067     // coalesced chunk to the appropriate free list.
8068     if (inFreeRange()) {
8069       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8070              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8071       flush_cur_free_chunk(freeFinger(),
8072                            pointer_delta(addr, freeFinger()));
8073       if (CMSTraceSweeper) {
8074         gclog_or_tty->print("Sweep: last chunk: ");
8075         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8076                    "[coalesced:"SIZE_FORMAT"]\n",
8077                    freeFinger(), pointer_delta(addr, freeFinger()),
8078                    lastFreeRangeCoalesced());
8079       }
8080     }
8081 
8082     // help the iterator loop finish
8083     return pointer_delta(_sp->end(), addr);
8084   }
8085 
8086   assert(addr < _limit, "sweep invariant");
8087   // check if we should yield
8088   do_yield_check(addr);
8089   if (fc->is_free()) {
8090     // Chunk that is already free
8091     res = fc->size();
8092     do_already_free_chunk(fc);
8093     debug_only(_sp->verifyFreeLists());
8094     // If we flush the chunk at hand in lookahead_and_flush()
8095     // and it's coalesced with a preceding chunk, then the
8096     // process of "mangling" the payload of the coalesced block
8097     // will cause erasure of the size information from the
8098     // (erstwhile) header of all the coalesced blocks but the
8099     // first, so the first disjunct in the assert will not hold
8100     // in that specific case (in which case the second disjunct
8101     // will hold).
8102     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8103            "Otherwise the size info doesn't change at this step");
8104     NOT_PRODUCT(
8105       _numObjectsAlreadyFree++;
8106       _numWordsAlreadyFree += res;
8107     )
8108     NOT_PRODUCT(_last_fc = fc;)
8109   } else if (!_bitMap->isMarked(addr)) {
8110     // Chunk is fresh garbage
8111     res = do_garbage_chunk(fc);
8112     debug_only(_sp->verifyFreeLists());
8113     NOT_PRODUCT(
8114       _numObjectsFreed++;
8115       _numWordsFreed += res;
8116     )
8117   } else {
8118     // Chunk that is alive.
8119     res = do_live_chunk(fc);
8120     debug_only(_sp->verifyFreeLists());
8121     NOT_PRODUCT(
8122         _numObjectsLive++;
8123         _numWordsLive += res;
8124     )
8125   }
8126   return res;
8127 }
8128 
8129 // For the smart allocation, record following
8130 //  split deaths - a free chunk is removed from its free list because
8131 //      it is being split into two or more chunks.
8132 //  split birth - a free chunk is being added to its free list because
8133 //      a larger free chunk has been split and resulted in this free chunk.
8134 //  coal death - a free chunk is being removed from its free list because
8135 //      it is being coalesced into a large free chunk.
8136 //  coal birth - a free chunk is being added to its free list because
8137 //      it was created when two or more free chunks where coalesced into
8138 //      this free chunk.
8139 //
8140 // These statistics are used to determine the desired number of free
8141 // chunks of a given size.  The desired number is chosen to be relative
8142 // to the end of a CMS sweep.  The desired number at the end of a sweep
8143 // is the
8144 //      count-at-end-of-previous-sweep (an amount that was enough)
8145 //              - count-at-beginning-of-current-sweep  (the excess)
8146 //              + split-births  (gains in this size during interval)
8147 //              - split-deaths  (demands on this size during interval)
8148 // where the interval is from the end of one sweep to the end of the
8149 // next.
8150 //
8151 // When sweeping the sweeper maintains an accumulated chunk which is
8152 // the chunk that is made up of chunks that have been coalesced.  That
8153 // will be termed the left-hand chunk.  A new chunk of garbage that
8154 // is being considered for coalescing will be referred to as the
8155 // right-hand chunk.
8156 //
8157 // When making a decision on whether to coalesce a right-hand chunk with
8158 // the current left-hand chunk, the current count vs. the desired count
8159 // of the left-hand chunk is considered.  Also if the right-hand chunk
8160 // is near the large chunk at the end of the heap (see
8161 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8162 // left-hand chunk is coalesced.
8163 //
8164 // When making a decision about whether to split a chunk, the desired count
8165 // vs. the current count of the candidate to be split is also considered.
8166 // If the candidate is underpopulated (currently fewer chunks than desired)
8167 // a chunk of an overpopulated (currently more chunks than desired) size may
8168 // be chosen.  The "hint" associated with a free list, if non-null, points
8169 // to a free list which may be overpopulated.
8170 //
8171 
8172 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8173   const size_t size = fc->size();
8174   // Chunks that cannot be coalesced are not in the
8175   // free lists.
8176   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8177     assert(_sp->verify_chunk_in_free_list(fc),
8178       "free chunk should be in free lists");
8179   }
8180   // a chunk that is already free, should not have been
8181   // marked in the bit map
8182   HeapWord* const addr = (HeapWord*) fc;
8183   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8184   // Verify that the bit map has no bits marked between
8185   // addr and purported end of this block.
8186   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8187 
8188   // Some chunks cannot be coalesced under any circumstances.
8189   // See the definition of cantCoalesce().
8190   if (!fc->cantCoalesce()) {
8191     // This chunk can potentially be coalesced.
8192     if (_sp->adaptive_freelists()) {
8193       // All the work is done in
8194       do_post_free_or_garbage_chunk(fc, size);
8195     } else {  // Not adaptive free lists
8196       // this is a free chunk that can potentially be coalesced by the sweeper;
8197       if (!inFreeRange()) {
8198         // if the next chunk is a free block that can't be coalesced
8199         // it doesn't make sense to remove this chunk from the free lists
8200         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8201         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8202         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8203             nextChunk->is_free()               &&     // ... which is free...
8204             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8205           // nothing to do
8206         } else {
8207           // Potentially the start of a new free range:
8208           // Don't eagerly remove it from the free lists.
8209           // No need to remove it if it will just be put
8210           // back again.  (Also from a pragmatic point of view
8211           // if it is a free block in a region that is beyond
8212           // any allocated blocks, an assertion will fail)
8213           // Remember the start of a free run.
8214           initialize_free_range(addr, true);
8215           // end - can coalesce with next chunk
8216         }
8217       } else {
8218         // the midst of a free range, we are coalescing
8219         print_free_block_coalesced(fc);
8220         if (CMSTraceSweeper) {
8221           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8222         }
8223         // remove it from the free lists
8224         _sp->removeFreeChunkFromFreeLists(fc);
8225         set_lastFreeRangeCoalesced(true);
8226         // If the chunk is being coalesced and the current free range is
8227         // in the free lists, remove the current free range so that it
8228         // will be returned to the free lists in its entirety - all
8229         // the coalesced pieces included.
8230         if (freeRangeInFreeLists()) {
8231           FreeChunk* ffc = (FreeChunk*) freeFinger();
8232           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8233             "Size of free range is inconsistent with chunk size.");
8234           if (CMSTestInFreeList) {
8235             assert(_sp->verify_chunk_in_free_list(ffc),
8236               "free range is not in free lists");
8237           }
8238           _sp->removeFreeChunkFromFreeLists(ffc);
8239           set_freeRangeInFreeLists(false);
8240         }
8241       }
8242     }
8243     // Note that if the chunk is not coalescable (the else arm
8244     // below), we unconditionally flush, without needing to do
8245     // a "lookahead," as we do below.
8246     if (inFreeRange()) lookahead_and_flush(fc, size);
8247   } else {
8248     // Code path common to both original and adaptive free lists.
8249 
8250     // cant coalesce with previous block; this should be treated
8251     // as the end of a free run if any
8252     if (inFreeRange()) {
8253       // we kicked some butt; time to pick up the garbage
8254       assert(freeFinger() < addr, "freeFinger points too high");
8255       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8256     }
8257     // else, nothing to do, just continue
8258   }
8259 }
8260 
8261 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8262   // This is a chunk of garbage.  It is not in any free list.
8263   // Add it to a free list or let it possibly be coalesced into
8264   // a larger chunk.
8265   HeapWord* const addr = (HeapWord*) fc;
8266   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8267 
8268   if (_sp->adaptive_freelists()) {
8269     // Verify that the bit map has no bits marked between
8270     // addr and purported end of just dead object.
8271     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8272 
8273     do_post_free_or_garbage_chunk(fc, size);
8274   } else {
8275     if (!inFreeRange()) {
8276       // start of a new free range
8277       assert(size > 0, "A free range should have a size");
8278       initialize_free_range(addr, false);
8279     } else {
8280       // this will be swept up when we hit the end of the
8281       // free range
8282       if (CMSTraceSweeper) {
8283         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8284       }
8285       // If the chunk is being coalesced and the current free range is
8286       // in the free lists, remove the current free range so that it
8287       // will be returned to the free lists in its entirety - all
8288       // the coalesced pieces included.
8289       if (freeRangeInFreeLists()) {
8290         FreeChunk* ffc = (FreeChunk*)freeFinger();
8291         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8292           "Size of free range is inconsistent with chunk size.");
8293         if (CMSTestInFreeList) {
8294           assert(_sp->verify_chunk_in_free_list(ffc),
8295             "free range is not in free lists");
8296         }
8297         _sp->removeFreeChunkFromFreeLists(ffc);
8298         set_freeRangeInFreeLists(false);
8299       }
8300       set_lastFreeRangeCoalesced(true);
8301     }
8302     // this will be swept up when we hit the end of the free range
8303 
8304     // Verify that the bit map has no bits marked between
8305     // addr and purported end of just dead object.
8306     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8307   }
8308   assert(_limit >= addr + size,
8309          "A freshly garbage chunk can't possibly straddle over _limit");
8310   if (inFreeRange()) lookahead_and_flush(fc, size);
8311   return size;
8312 }
8313 
8314 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8315   HeapWord* addr = (HeapWord*) fc;
8316   // The sweeper has just found a live object. Return any accumulated
8317   // left hand chunk to the free lists.
8318   if (inFreeRange()) {
8319     assert(freeFinger() < addr, "freeFinger points too high");
8320     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8321   }
8322 
8323   // This object is live: we'd normally expect this to be
8324   // an oop, and like to assert the following:
8325   // assert(oop(addr)->is_oop(), "live block should be an oop");
8326   // However, as we commented above, this may be an object whose
8327   // header hasn't yet been initialized.
8328   size_t size;
8329   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8330   if (_bitMap->isMarked(addr + 1)) {
8331     // Determine the size from the bit map, rather than trying to
8332     // compute it from the object header.
8333     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8334     size = pointer_delta(nextOneAddr + 1, addr);
8335     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8336            "alignment problem");
8337 
8338 #ifdef DEBUG
8339       if (oop(addr)->klass_or_null() != NULL &&
8340           (   !_collector->should_unload_classes()
8341            || (oop(addr)->is_parsable()) &&
8342                oop(addr)->is_conc_safe())) {
8343         // Ignore mark word because we are running concurrent with mutators
8344         assert(oop(addr)->is_oop(true), "live block should be an oop");
8345         // is_conc_safe is checked before performing this assertion
8346         // because an object that is not is_conc_safe may yet have
8347         // the return from size() correct.
8348         assert(size ==
8349                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8350                "P-mark and computed size do not agree");
8351       }
8352 #endif
8353 
8354   } else {
8355     // This should be an initialized object that's alive.
8356     assert(oop(addr)->klass_or_null() != NULL &&
8357            (!_collector->should_unload_classes()
8358             || oop(addr)->is_parsable()),
8359            "Should be an initialized object");
8360     // Note that there are objects used during class redefinition,
8361     // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
8362     // which are discarded with their is_conc_safe state still
8363     // false.  These object may be floating garbage so may be
8364     // seen here.  If they are floating garbage their size
8365     // should be attainable from their klass.  Do not that
8366     // is_conc_safe() is true for oop(addr).
8367     // Ignore mark word because we are running concurrent with mutators
8368     assert(oop(addr)->is_oop(true), "live block should be an oop");
8369     // Verify that the bit map has no bits marked between
8370     // addr and purported end of this block.
8371     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8372     assert(size >= 3, "Necessary for Printezis marks to work");
8373     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8374     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8375   }
8376   return size;
8377 }
8378 
8379 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8380                                                  size_t chunkSize) {
8381   // do_post_free_or_garbage_chunk() should only be called in the case
8382   // of the adaptive free list allocator.
8383   const bool fcInFreeLists = fc->is_free();
8384   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8385   assert((HeapWord*)fc <= _limit, "sweep invariant");
8386   if (CMSTestInFreeList && fcInFreeLists) {
8387     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8388   }
8389 
8390   if (CMSTraceSweeper) {
8391     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8392   }
8393 
8394   HeapWord* const fc_addr = (HeapWord*) fc;
8395 
8396   bool coalesce;
8397   const size_t left  = pointer_delta(fc_addr, freeFinger());
8398   const size_t right = chunkSize;
8399   switch (FLSCoalescePolicy) {
8400     // numeric value forms a coalition aggressiveness metric
8401     case 0:  { // never coalesce
8402       coalesce = false;
8403       break;
8404     }
8405     case 1: { // coalesce if left & right chunks on overpopulated lists
8406       coalesce = _sp->coalOverPopulated(left) &&
8407                  _sp->coalOverPopulated(right);
8408       break;
8409     }
8410     case 2: { // coalesce if left chunk on overpopulated list (default)
8411       coalesce = _sp->coalOverPopulated(left);
8412       break;
8413     }
8414     case 3: { // coalesce if left OR right chunk on overpopulated list
8415       coalesce = _sp->coalOverPopulated(left) ||
8416                  _sp->coalOverPopulated(right);
8417       break;
8418     }
8419     case 4: { // always coalesce
8420       coalesce = true;
8421       break;
8422     }
8423     default:
8424      ShouldNotReachHere();
8425   }
8426 
8427   // Should the current free range be coalesced?
8428   // If the chunk is in a free range and either we decided to coalesce above
8429   // or the chunk is near the large block at the end of the heap
8430   // (isNearLargestChunk() returns true), then coalesce this chunk.
8431   const bool doCoalesce = inFreeRange()
8432                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8433   if (doCoalesce) {
8434     // Coalesce the current free range on the left with the new
8435     // chunk on the right.  If either is on a free list,
8436     // it must be removed from the list and stashed in the closure.
8437     if (freeRangeInFreeLists()) {
8438       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8439       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8440         "Size of free range is inconsistent with chunk size.");
8441       if (CMSTestInFreeList) {
8442         assert(_sp->verify_chunk_in_free_list(ffc),
8443           "Chunk is not in free lists");
8444       }
8445       _sp->coalDeath(ffc->size());
8446       _sp->removeFreeChunkFromFreeLists(ffc);
8447       set_freeRangeInFreeLists(false);
8448     }
8449     if (fcInFreeLists) {
8450       _sp->coalDeath(chunkSize);
8451       assert(fc->size() == chunkSize,
8452         "The chunk has the wrong size or is not in the free lists");
8453       _sp->removeFreeChunkFromFreeLists(fc);
8454     }
8455     set_lastFreeRangeCoalesced(true);
8456     print_free_block_coalesced(fc);
8457   } else {  // not in a free range and/or should not coalesce
8458     // Return the current free range and start a new one.
8459     if (inFreeRange()) {
8460       // In a free range but cannot coalesce with the right hand chunk.
8461       // Put the current free range into the free lists.
8462       flush_cur_free_chunk(freeFinger(),
8463                            pointer_delta(fc_addr, freeFinger()));
8464     }
8465     // Set up for new free range.  Pass along whether the right hand
8466     // chunk is in the free lists.
8467     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8468   }
8469 }
8470 
8471 // Lookahead flush:
8472 // If we are tracking a free range, and this is the last chunk that
8473 // we'll look at because its end crosses past _limit, we'll preemptively
8474 // flush it along with any free range we may be holding on to. Note that
8475 // this can be the case only for an already free or freshly garbage
8476 // chunk. If this block is an object, it can never straddle
8477 // over _limit. The "straddling" occurs when _limit is set at
8478 // the previous end of the space when this cycle started, and
8479 // a subsequent heap expansion caused the previously co-terminal
8480 // free block to be coalesced with the newly expanded portion,
8481 // thus rendering _limit a non-block-boundary making it dangerous
8482 // for the sweeper to step over and examine.
8483 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8484   assert(inFreeRange(), "Should only be called if currently in a free range.");
8485   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8486   assert(_sp->used_region().contains(eob - 1),
8487          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8488                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8489                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8490   if (eob >= _limit) {
8491     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8492     if (CMSTraceSweeper) {
8493       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8494                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8495                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8496                              _limit, fc, eob, _sp->bottom(), _sp->end());
8497     }
8498     // Return the storage we are tracking back into the free lists.
8499     if (CMSTraceSweeper) {
8500       gclog_or_tty->print_cr("Flushing ... ");
8501     }
8502     assert(freeFinger() < eob, "Error");
8503     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8504   }
8505 }
8506 
8507 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8508   assert(inFreeRange(), "Should only be called if currently in a free range.");
8509   assert(size > 0,
8510     "A zero sized chunk cannot be added to the free lists.");
8511   if (!freeRangeInFreeLists()) {
8512     if (CMSTestInFreeList) {
8513       FreeChunk* fc = (FreeChunk*) chunk;
8514       fc->set_size(size);
8515       assert(!_sp->verify_chunk_in_free_list(fc),
8516         "chunk should not be in free lists yet");
8517     }
8518     if (CMSTraceSweeper) {
8519       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8520                     chunk, size);
8521     }
8522     // A new free range is going to be starting.  The current
8523     // free range has not been added to the free lists yet or
8524     // was removed so add it back.
8525     // If the current free range was coalesced, then the death
8526     // of the free range was recorded.  Record a birth now.
8527     if (lastFreeRangeCoalesced()) {
8528       _sp->coalBirth(size);
8529     }
8530     _sp->addChunkAndRepairOffsetTable(chunk, size,
8531             lastFreeRangeCoalesced());
8532   } else if (CMSTraceSweeper) {
8533     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8534   }
8535   set_inFreeRange(false);
8536   set_freeRangeInFreeLists(false);
8537 }
8538 
8539 // We take a break if we've been at this for a while,
8540 // so as to avoid monopolizing the locks involved.
8541 void SweepClosure::do_yield_work(HeapWord* addr) {
8542   // Return current free chunk being used for coalescing (if any)
8543   // to the appropriate freelist.  After yielding, the next
8544   // free block encountered will start a coalescing range of
8545   // free blocks.  If the next free block is adjacent to the
8546   // chunk just flushed, they will need to wait for the next
8547   // sweep to be coalesced.
8548   if (inFreeRange()) {
8549     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8550   }
8551 
8552   // First give up the locks, then yield, then re-lock.
8553   // We should probably use a constructor/destructor idiom to
8554   // do this unlock/lock or modify the MutexUnlocker class to
8555   // serve our purpose. XXX
8556   assert_lock_strong(_bitMap->lock());
8557   assert_lock_strong(_freelistLock);
8558   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8559          "CMS thread should hold CMS token");
8560   _bitMap->lock()->unlock();
8561   _freelistLock->unlock();
8562   ConcurrentMarkSweepThread::desynchronize(true);
8563   ConcurrentMarkSweepThread::acknowledge_yield_request();
8564   _collector->stopTimer();
8565   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8566   if (PrintCMSStatistics != 0) {
8567     _collector->incrementYields();
8568   }
8569   _collector->icms_wait();
8570 
8571   // See the comment in coordinator_yield()
8572   for (unsigned i = 0; i < CMSYieldSleepCount &&
8573                        ConcurrentMarkSweepThread::should_yield() &&
8574                        !CMSCollector::foregroundGCIsActive(); ++i) {
8575     os::sleep(Thread::current(), 1, false);
8576     ConcurrentMarkSweepThread::acknowledge_yield_request();
8577   }
8578 
8579   ConcurrentMarkSweepThread::synchronize(true);
8580   _freelistLock->lock();
8581   _bitMap->lock()->lock_without_safepoint_check();
8582   _collector->startTimer();
8583 }
8584 
8585 #ifndef PRODUCT
8586 // This is actually very useful in a product build if it can
8587 // be called from the debugger.  Compile it into the product
8588 // as needed.
8589 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8590   return debug_cms_space->verify_chunk_in_free_list(fc);
8591 }
8592 #endif
8593 
8594 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8595   if (CMSTraceSweeper) {
8596     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8597                            fc, fc->size());
8598   }
8599 }
8600 
8601 // CMSIsAliveClosure
8602 bool CMSIsAliveClosure::do_object_b(oop obj) {
8603   HeapWord* addr = (HeapWord*)obj;
8604   return addr != NULL &&
8605          (!_span.contains(addr) || _bit_map->isMarked(addr));
8606 }
8607 
8608 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8609                       MemRegion span,
8610                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8611                       CMSMarkStack* revisit_stack, bool cpc):
8612   KlassRememberingOopClosure(collector, NULL, revisit_stack),
8613   _span(span),
8614   _bit_map(bit_map),
8615   _mark_stack(mark_stack),
8616   _concurrent_precleaning(cpc) {
8617   assert(!_span.is_empty(), "Empty span could spell trouble");
8618 }
8619 
8620 
8621 // CMSKeepAliveClosure: the serial version
8622 void CMSKeepAliveClosure::do_oop(oop obj) {
8623   HeapWord* addr = (HeapWord*)obj;
8624   if (_span.contains(addr) &&
8625       !_bit_map->isMarked(addr)) {
8626     _bit_map->mark(addr);
8627     bool simulate_overflow = false;
8628     NOT_PRODUCT(
8629       if (CMSMarkStackOverflowALot &&
8630           _collector->simulate_overflow()) {
8631         // simulate a stack overflow
8632         simulate_overflow = true;
8633       }
8634     )
8635     if (simulate_overflow || !_mark_stack->push(obj)) {
8636       if (_concurrent_precleaning) {
8637         // We dirty the overflown object and let the remark
8638         // phase deal with it.
8639         assert(_collector->overflow_list_is_empty(), "Error");
8640         // In the case of object arrays, we need to dirty all of
8641         // the cards that the object spans. No locking or atomics
8642         // are needed since no one else can be mutating the mod union
8643         // table.
8644         if (obj->is_objArray()) {
8645           size_t sz = obj->size();
8646           HeapWord* end_card_addr =
8647             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8648           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8649           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8650           _collector->_modUnionTable.mark_range(redirty_range);
8651         } else {
8652           _collector->_modUnionTable.mark(addr);
8653         }
8654         _collector->_ser_kac_preclean_ovflw++;
8655       } else {
8656         _collector->push_on_overflow_list(obj);
8657         _collector->_ser_kac_ovflw++;
8658       }
8659     }
8660   }
8661 }
8662 
8663 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8664 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8665 
8666 // CMSParKeepAliveClosure: a parallel version of the above.
8667 // The work queues are private to each closure (thread),
8668 // but (may be) available for stealing by other threads.
8669 void CMSParKeepAliveClosure::do_oop(oop obj) {
8670   HeapWord* addr = (HeapWord*)obj;
8671   if (_span.contains(addr) &&
8672       !_bit_map->isMarked(addr)) {
8673     // In general, during recursive tracing, several threads
8674     // may be concurrently getting here; the first one to
8675     // "tag" it, claims it.
8676     if (_bit_map->par_mark(addr)) {
8677       bool res = _work_queue->push(obj);
8678       assert(res, "Low water mark should be much less than capacity");
8679       // Do a recursive trim in the hope that this will keep
8680       // stack usage lower, but leave some oops for potential stealers
8681       trim_queue(_low_water_mark);
8682     } // Else, another thread got there first
8683   }
8684 }
8685 
8686 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8687 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8688 
8689 void CMSParKeepAliveClosure::trim_queue(uint max) {
8690   while (_work_queue->size() > max) {
8691     oop new_oop;
8692     if (_work_queue->pop_local(new_oop)) {
8693       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8694       assert(_bit_map->isMarked((HeapWord*)new_oop),
8695              "no white objects on this stack!");
8696       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8697       // iterate over the oops in this oop, marking and pushing
8698       // the ones in CMS heap (i.e. in _span).
8699       new_oop->oop_iterate(&_mark_and_push);
8700     }
8701   }
8702 }
8703 
8704 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8705                                 CMSCollector* collector,
8706                                 MemRegion span, CMSBitMap* bit_map,
8707                                 CMSMarkStack* revisit_stack,
8708                                 OopTaskQueue* work_queue):
8709   Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
8710   _span(span),
8711   _bit_map(bit_map),
8712   _work_queue(work_queue) { }
8713 
8714 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8715   HeapWord* addr = (HeapWord*)obj;
8716   if (_span.contains(addr) &&
8717       !_bit_map->isMarked(addr)) {
8718     if (_bit_map->par_mark(addr)) {
8719       bool simulate_overflow = false;
8720       NOT_PRODUCT(
8721         if (CMSMarkStackOverflowALot &&
8722             _collector->par_simulate_overflow()) {
8723           // simulate a stack overflow
8724           simulate_overflow = true;
8725         }
8726       )
8727       if (simulate_overflow || !_work_queue->push(obj)) {
8728         _collector->par_push_on_overflow_list(obj);
8729         _collector->_par_kac_ovflw++;
8730       }
8731     } // Else another thread got there already
8732   }
8733 }
8734 
8735 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8736 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8737 
8738 //////////////////////////////////////////////////////////////////
8739 //  CMSExpansionCause                /////////////////////////////
8740 //////////////////////////////////////////////////////////////////
8741 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8742   switch (cause) {
8743     case _no_expansion:
8744       return "No expansion";
8745     case _satisfy_free_ratio:
8746       return "Free ratio";
8747     case _satisfy_promotion:
8748       return "Satisfy promotion";
8749     case _satisfy_allocation:
8750       return "allocation";
8751     case _allocate_par_lab:
8752       return "Par LAB";
8753     case _allocate_par_spooling_space:
8754       return "Par Spooling Space";
8755     case _adaptive_size_policy:
8756       return "Ergonomics";
8757     default:
8758       return "unknown";
8759   }
8760 }
8761 
8762 void CMSDrainMarkingStackClosure::do_void() {
8763   // the max number to take from overflow list at a time
8764   const size_t num = _mark_stack->capacity()/4;
8765   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8766          "Overflow list should be NULL during concurrent phases");
8767   while (!_mark_stack->isEmpty() ||
8768          // if stack is empty, check the overflow list
8769          _collector->take_from_overflow_list(num, _mark_stack)) {
8770     oop obj = _mark_stack->pop();
8771     HeapWord* addr = (HeapWord*)obj;
8772     assert(_span.contains(addr), "Should be within span");
8773     assert(_bit_map->isMarked(addr), "Should be marked");
8774     assert(obj->is_oop(), "Should be an oop");
8775     obj->oop_iterate(_keep_alive);
8776   }
8777 }
8778 
8779 void CMSParDrainMarkingStackClosure::do_void() {
8780   // drain queue
8781   trim_queue(0);
8782 }
8783 
8784 // Trim our work_queue so its length is below max at return
8785 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8786   while (_work_queue->size() > max) {
8787     oop new_oop;
8788     if (_work_queue->pop_local(new_oop)) {
8789       assert(new_oop->is_oop(), "Expected an oop");
8790       assert(_bit_map->isMarked((HeapWord*)new_oop),
8791              "no white objects on this stack!");
8792       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8793       // iterate over the oops in this oop, marking and pushing
8794       // the ones in CMS heap (i.e. in _span).
8795       new_oop->oop_iterate(&_mark_and_push);
8796     }
8797   }
8798 }
8799 
8800 ////////////////////////////////////////////////////////////////////
8801 // Support for Marking Stack Overflow list handling and related code
8802 ////////////////////////////////////////////////////////////////////
8803 // Much of the following code is similar in shape and spirit to the
8804 // code used in ParNewGC. We should try and share that code
8805 // as much as possible in the future.
8806 
8807 #ifndef PRODUCT
8808 // Debugging support for CMSStackOverflowALot
8809 
8810 // It's OK to call this multi-threaded;  the worst thing
8811 // that can happen is that we'll get a bunch of closely
8812 // spaced simulated oveflows, but that's OK, in fact
8813 // probably good as it would exercise the overflow code
8814 // under contention.
8815 bool CMSCollector::simulate_overflow() {
8816   if (_overflow_counter-- <= 0) { // just being defensive
8817     _overflow_counter = CMSMarkStackOverflowInterval;
8818     return true;
8819   } else {
8820     return false;
8821   }
8822 }
8823 
8824 bool CMSCollector::par_simulate_overflow() {
8825   return simulate_overflow();
8826 }
8827 #endif
8828 
8829 // Single-threaded
8830 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8831   assert(stack->isEmpty(), "Expected precondition");
8832   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8833   size_t i = num;
8834   oop  cur = _overflow_list;
8835   const markOop proto = markOopDesc::prototype();
8836   NOT_PRODUCT(ssize_t n = 0;)
8837   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8838     next = oop(cur->mark());
8839     cur->set_mark(proto);   // until proven otherwise
8840     assert(cur->is_oop(), "Should be an oop");
8841     bool res = stack->push(cur);
8842     assert(res, "Bit off more than can chew?");
8843     NOT_PRODUCT(n++;)
8844   }
8845   _overflow_list = cur;
8846 #ifndef PRODUCT
8847   assert(_num_par_pushes >= n, "Too many pops?");
8848   _num_par_pushes -=n;
8849 #endif
8850   return !stack->isEmpty();
8851 }
8852 
8853 #define BUSY  (oop(0x1aff1aff))
8854 // (MT-safe) Get a prefix of at most "num" from the list.
8855 // The overflow list is chained through the mark word of
8856 // each object in the list. We fetch the entire list,
8857 // break off a prefix of the right size and return the
8858 // remainder. If other threads try to take objects from
8859 // the overflow list at that time, they will wait for
8860 // some time to see if data becomes available. If (and
8861 // only if) another thread places one or more object(s)
8862 // on the global list before we have returned the suffix
8863 // to the global list, we will walk down our local list
8864 // to find its end and append the global list to
8865 // our suffix before returning it. This suffix walk can
8866 // prove to be expensive (quadratic in the amount of traffic)
8867 // when there are many objects in the overflow list and
8868 // there is much producer-consumer contention on the list.
8869 // *NOTE*: The overflow list manipulation code here and
8870 // in ParNewGeneration:: are very similar in shape,
8871 // except that in the ParNew case we use the old (from/eden)
8872 // copy of the object to thread the list via its klass word.
8873 // Because of the common code, if you make any changes in
8874 // the code below, please check the ParNew version to see if
8875 // similar changes might be needed.
8876 // CR 6797058 has been filed to consolidate the common code.
8877 bool CMSCollector::par_take_from_overflow_list(size_t num,
8878                                                OopTaskQueue* work_q,
8879                                                int no_of_gc_threads) {
8880   assert(work_q->size() == 0, "First empty local work queue");
8881   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8882   if (_overflow_list == NULL) {
8883     return false;
8884   }
8885   // Grab the entire list; we'll put back a suffix
8886   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8887   Thread* tid = Thread::current();
8888   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8889   // set to ParallelGCThreads.
8890   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8891   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8892   // If the list is busy, we spin for a short while,
8893   // sleeping between attempts to get the list.
8894   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8895     os::sleep(tid, sleep_time_millis, false);
8896     if (_overflow_list == NULL) {
8897       // Nothing left to take
8898       return false;
8899     } else if (_overflow_list != BUSY) {
8900       // Try and grab the prefix
8901       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8902     }
8903   }
8904   // If the list was found to be empty, or we spun long
8905   // enough, we give up and return empty-handed. If we leave
8906   // the list in the BUSY state below, it must be the case that
8907   // some other thread holds the overflow list and will set it
8908   // to a non-BUSY state in the future.
8909   if (prefix == NULL || prefix == BUSY) {
8910      // Nothing to take or waited long enough
8911      if (prefix == NULL) {
8912        // Write back the NULL in case we overwrote it with BUSY above
8913        // and it is still the same value.
8914        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8915      }
8916      return false;
8917   }
8918   assert(prefix != NULL && prefix != BUSY, "Error");
8919   size_t i = num;
8920   oop cur = prefix;
8921   // Walk down the first "num" objects, unless we reach the end.
8922   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8923   if (cur->mark() == NULL) {
8924     // We have "num" or fewer elements in the list, so there
8925     // is nothing to return to the global list.
8926     // Write back the NULL in lieu of the BUSY we wrote
8927     // above, if it is still the same value.
8928     if (_overflow_list == BUSY) {
8929       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8930     }
8931   } else {
8932     // Chop off the suffix and rerturn it to the global list.
8933     assert(cur->mark() != BUSY, "Error");
8934     oop suffix_head = cur->mark(); // suffix will be put back on global list
8935     cur->set_mark(NULL);           // break off suffix
8936     // It's possible that the list is still in the empty(busy) state
8937     // we left it in a short while ago; in that case we may be
8938     // able to place back the suffix without incurring the cost
8939     // of a walk down the list.
8940     oop observed_overflow_list = _overflow_list;
8941     oop cur_overflow_list = observed_overflow_list;
8942     bool attached = false;
8943     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8944       observed_overflow_list =
8945         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8946       if (cur_overflow_list == observed_overflow_list) {
8947         attached = true;
8948         break;
8949       } else cur_overflow_list = observed_overflow_list;
8950     }
8951     if (!attached) {
8952       // Too bad, someone else sneaked in (at least) an element; we'll need
8953       // to do a splice. Find tail of suffix so we can prepend suffix to global
8954       // list.
8955       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8956       oop suffix_tail = cur;
8957       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8958              "Tautology");
8959       observed_overflow_list = _overflow_list;
8960       do {
8961         cur_overflow_list = observed_overflow_list;
8962         if (cur_overflow_list != BUSY) {
8963           // Do the splice ...
8964           suffix_tail->set_mark(markOop(cur_overflow_list));
8965         } else { // cur_overflow_list == BUSY
8966           suffix_tail->set_mark(NULL);
8967         }
8968         // ... and try to place spliced list back on overflow_list ...
8969         observed_overflow_list =
8970           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8971       } while (cur_overflow_list != observed_overflow_list);
8972       // ... until we have succeeded in doing so.
8973     }
8974   }
8975 
8976   // Push the prefix elements on work_q
8977   assert(prefix != NULL, "control point invariant");
8978   const markOop proto = markOopDesc::prototype();
8979   oop next;
8980   NOT_PRODUCT(ssize_t n = 0;)
8981   for (cur = prefix; cur != NULL; cur = next) {
8982     next = oop(cur->mark());
8983     cur->set_mark(proto);   // until proven otherwise
8984     assert(cur->is_oop(), "Should be an oop");
8985     bool res = work_q->push(cur);
8986     assert(res, "Bit off more than we can chew?");
8987     NOT_PRODUCT(n++;)
8988   }
8989 #ifndef PRODUCT
8990   assert(_num_par_pushes >= n, "Too many pops?");
8991   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8992 #endif
8993   return true;
8994 }
8995 
8996 // Single-threaded
8997 void CMSCollector::push_on_overflow_list(oop p) {
8998   NOT_PRODUCT(_num_par_pushes++;)
8999   assert(p->is_oop(), "Not an oop");
9000   preserve_mark_if_necessary(p);
9001   p->set_mark((markOop)_overflow_list);
9002   _overflow_list = p;
9003 }
9004 
9005 // Multi-threaded; use CAS to prepend to overflow list
9006 void CMSCollector::par_push_on_overflow_list(oop p) {
9007   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9008   assert(p->is_oop(), "Not an oop");
9009   par_preserve_mark_if_necessary(p);
9010   oop observed_overflow_list = _overflow_list;
9011   oop cur_overflow_list;
9012   do {
9013     cur_overflow_list = observed_overflow_list;
9014     if (cur_overflow_list != BUSY) {
9015       p->set_mark(markOop(cur_overflow_list));
9016     } else {
9017       p->set_mark(NULL);
9018     }
9019     observed_overflow_list =
9020       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9021   } while (cur_overflow_list != observed_overflow_list);
9022 }
9023 #undef BUSY
9024 
9025 // Single threaded
9026 // General Note on GrowableArray: pushes may silently fail
9027 // because we are (temporarily) out of C-heap for expanding
9028 // the stack. The problem is quite ubiquitous and affects
9029 // a lot of code in the JVM. The prudent thing for GrowableArray
9030 // to do (for now) is to exit with an error. However, that may
9031 // be too draconian in some cases because the caller may be
9032 // able to recover without much harm. For such cases, we
9033 // should probably introduce a "soft_push" method which returns
9034 // an indication of success or failure with the assumption that
9035 // the caller may be able to recover from a failure; code in
9036 // the VM can then be changed, incrementally, to deal with such
9037 // failures where possible, thus, incrementally hardening the VM
9038 // in such low resource situations.
9039 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9040   _preserved_oop_stack.push(p);
9041   _preserved_mark_stack.push(m);
9042   assert(m == p->mark(), "Mark word changed");
9043   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9044          "bijection");
9045 }
9046 
9047 // Single threaded
9048 void CMSCollector::preserve_mark_if_necessary(oop p) {
9049   markOop m = p->mark();
9050   if (m->must_be_preserved(p)) {
9051     preserve_mark_work(p, m);
9052   }
9053 }
9054 
9055 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9056   markOop m = p->mark();
9057   if (m->must_be_preserved(p)) {
9058     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9059     // Even though we read the mark word without holding
9060     // the lock, we are assured that it will not change
9061     // because we "own" this oop, so no other thread can
9062     // be trying to push it on the overflow list; see
9063     // the assertion in preserve_mark_work() that checks
9064     // that m == p->mark().
9065     preserve_mark_work(p, m);
9066   }
9067 }
9068 
9069 // We should be able to do this multi-threaded,
9070 // a chunk of stack being a task (this is
9071 // correct because each oop only ever appears
9072 // once in the overflow list. However, it's
9073 // not very easy to completely overlap this with
9074 // other operations, so will generally not be done
9075 // until all work's been completed. Because we
9076 // expect the preserved oop stack (set) to be small,
9077 // it's probably fine to do this single-threaded.
9078 // We can explore cleverer concurrent/overlapped/parallel
9079 // processing of preserved marks if we feel the
9080 // need for this in the future. Stack overflow should
9081 // be so rare in practice and, when it happens, its
9082 // effect on performance so great that this will
9083 // likely just be in the noise anyway.
9084 void CMSCollector::restore_preserved_marks_if_any() {
9085   assert(SafepointSynchronize::is_at_safepoint(),
9086          "world should be stopped");
9087   assert(Thread::current()->is_ConcurrentGC_thread() ||
9088          Thread::current()->is_VM_thread(),
9089          "should be single-threaded");
9090   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9091          "bijection");
9092 
9093   while (!_preserved_oop_stack.is_empty()) {
9094     oop p = _preserved_oop_stack.pop();
9095     assert(p->is_oop(), "Should be an oop");
9096     assert(_span.contains(p), "oop should be in _span");
9097     assert(p->mark() == markOopDesc::prototype(),
9098            "Set when taken from overflow list");
9099     markOop m = _preserved_mark_stack.pop();
9100     p->set_mark(m);
9101   }
9102   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9103          "stacks were cleared above");
9104 }
9105 
9106 #ifndef PRODUCT
9107 bool CMSCollector::no_preserved_marks() const {
9108   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9109 }
9110 #endif
9111 
9112 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9113 {
9114   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9115   CMSAdaptiveSizePolicy* size_policy =
9116     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9117   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9118     "Wrong type for size policy");
9119   return size_policy;
9120 }
9121 
9122 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9123                                            size_t desired_promo_size) {
9124   if (cur_promo_size < desired_promo_size) {
9125     size_t expand_bytes = desired_promo_size - cur_promo_size;
9126     if (PrintAdaptiveSizePolicy && Verbose) {
9127       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9128         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9129         expand_bytes);
9130     }
9131     expand(expand_bytes,
9132            MinHeapDeltaBytes,
9133            CMSExpansionCause::_adaptive_size_policy);
9134   } else if (desired_promo_size < cur_promo_size) {
9135     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9136     if (PrintAdaptiveSizePolicy && Verbose) {
9137       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9138         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9139         shrink_bytes);
9140     }
9141     shrink(shrink_bytes);
9142   }
9143 }
9144 
9145 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9146   GenCollectedHeap* gch = GenCollectedHeap::heap();
9147   CMSGCAdaptivePolicyCounters* counters =
9148     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9149   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9150     "Wrong kind of counters");
9151   return counters;
9152 }
9153 
9154 
9155 void ASConcurrentMarkSweepGeneration::update_counters() {
9156   if (UsePerfData) {
9157     _space_counters->update_all();
9158     _gen_counters->update_all();
9159     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9160     GenCollectedHeap* gch = GenCollectedHeap::heap();
9161     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9162     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9163       "Wrong gc statistics type");
9164     counters->update_counters(gc_stats_l);
9165   }
9166 }
9167 
9168 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9169   if (UsePerfData) {
9170     _space_counters->update_used(used);
9171     _space_counters->update_capacity();
9172     _gen_counters->update_all();
9173 
9174     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9175     GenCollectedHeap* gch = GenCollectedHeap::heap();
9176     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9177     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9178       "Wrong gc statistics type");
9179     counters->update_counters(gc_stats_l);
9180   }
9181 }
9182 
9183 // The desired expansion delta is computed so that:
9184 // . desired free percentage or greater is used
9185 void ASConcurrentMarkSweepGeneration::compute_new_size() {
9186   assert_locked_or_safepoint(Heap_lock);
9187 
9188   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9189 
9190   // If incremental collection failed, we just want to expand
9191   // to the limit.
9192   if (incremental_collection_failed()) {
9193     clear_incremental_collection_failed();
9194     grow_to_reserved();
9195     return;
9196   }
9197 
9198   assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
9199 
9200   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
9201     "Wrong type of heap");
9202   int prev_level = level() - 1;
9203   assert(prev_level >= 0, "The cms generation is the lowest generation");
9204   Generation* prev_gen = gch->get_gen(prev_level);
9205   assert(prev_gen->kind() == Generation::ASParNew,
9206     "Wrong type of young generation");
9207   ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
9208   size_t cur_eden = younger_gen->eden()->capacity();
9209   CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
9210   size_t cur_promo = free();
9211   size_policy->compute_tenured_generation_free_space(cur_promo,
9212                                                        max_available(),
9213                                                        cur_eden);
9214   resize(cur_promo, size_policy->promo_size());
9215 
9216   // Record the new size of the space in the cms generation
9217   // that is available for promotions.  This is temporary.
9218   // It should be the desired promo size.
9219   size_policy->avg_cms_promo()->sample(free());
9220   size_policy->avg_old_live()->sample(used());
9221 
9222   if (UsePerfData) {
9223     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9224     counters->update_cms_capacity_counter(capacity());
9225   }
9226 }
9227 
9228 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9229   assert_locked_or_safepoint(Heap_lock);
9230   assert_lock_strong(freelistLock());
9231   HeapWord* old_end = _cmsSpace->end();
9232   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9233   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9234   FreeChunk* chunk_at_end = find_chunk_at_end();
9235   if (chunk_at_end == NULL) {
9236     // No room to shrink
9237     if (PrintGCDetails && Verbose) {
9238       gclog_or_tty->print_cr("No room to shrink: old_end  "
9239         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9240         " chunk_at_end  " PTR_FORMAT,
9241         old_end, unallocated_start, chunk_at_end);
9242     }
9243     return;
9244   } else {
9245 
9246     // Find the chunk at the end of the space and determine
9247     // how much it can be shrunk.
9248     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9249     size_t aligned_shrinkable_size_in_bytes =
9250       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9251     assert(unallocated_start <= chunk_at_end->end(),
9252       "Inconsistent chunk at end of space");
9253     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9254     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9255 
9256     // Shrink the underlying space
9257     _virtual_space.shrink_by(bytes);
9258     if (PrintGCDetails && Verbose) {
9259       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9260         " desired_bytes " SIZE_FORMAT
9261         " shrinkable_size_in_bytes " SIZE_FORMAT
9262         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9263         "  bytes  " SIZE_FORMAT,
9264         desired_bytes, shrinkable_size_in_bytes,
9265         aligned_shrinkable_size_in_bytes, bytes);
9266       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9267         "  unallocated_start  " SIZE_FORMAT,
9268         old_end, unallocated_start);
9269     }
9270 
9271     // If the space did shrink (shrinking is not guaranteed),
9272     // shrink the chunk at the end by the appropriate amount.
9273     if (((HeapWord*)_virtual_space.high()) < old_end) {
9274       size_t new_word_size =
9275         heap_word_size(_virtual_space.committed_size());
9276 
9277       // Have to remove the chunk from the dictionary because it is changing
9278       // size and might be someplace elsewhere in the dictionary.
9279 
9280       // Get the chunk at end, shrink it, and put it
9281       // back.
9282       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9283       size_t word_size_change = word_size_before - new_word_size;
9284       size_t chunk_at_end_old_size = chunk_at_end->size();
9285       assert(chunk_at_end_old_size >= word_size_change,
9286         "Shrink is too large");
9287       chunk_at_end->set_size(chunk_at_end_old_size -
9288                           word_size_change);
9289       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9290         word_size_change);
9291 
9292       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9293 
9294       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9295       _bts->resize(new_word_size);  // resize the block offset shared array
9296       Universe::heap()->barrier_set()->resize_covered_region(mr);
9297       _cmsSpace->assert_locked();
9298       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9299 
9300       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9301 
9302       // update the space and generation capacity counters
9303       if (UsePerfData) {
9304         _space_counters->update_capacity();
9305         _gen_counters->update_all();
9306       }
9307 
9308       if (Verbose && PrintGCDetails) {
9309         size_t new_mem_size = _virtual_space.committed_size();
9310         size_t old_mem_size = new_mem_size + bytes;
9311         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9312                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9313       }
9314     }
9315 
9316     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9317       "Inconsistency at end of space");
9318     assert(chunk_at_end->end() == _cmsSpace->end(),
9319       "Shrinking is inconsistent");
9320     return;
9321   }
9322 }
9323 
9324 // Transfer some number of overflown objects to usual marking
9325 // stack. Return true if some objects were transferred.
9326 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9327   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9328                     (size_t)ParGCDesiredObjsFromOverflowList);
9329 
9330   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9331   assert(_collector->overflow_list_is_empty() || res,
9332          "If list is not empty, we should have taken something");
9333   assert(!res || !_mark_stack->isEmpty(),
9334          "If we took something, it should now be on our stack");
9335   return res;
9336 }
9337 
9338 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9339   size_t res = _sp->block_size_no_stall(addr, _collector);
9340   if (_sp->block_is_obj(addr)) {
9341     if (_live_bit_map->isMarked(addr)) {
9342       // It can't have been dead in a previous cycle
9343       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9344     } else {
9345       _dead_bit_map->mark(addr);      // mark the dead object
9346     }
9347   }
9348   // Could be 0, if the block size could not be computed without stalling.
9349   return res;
9350 }
9351 
9352 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9353 
9354   switch (phase) {
9355     case CMSCollector::InitialMarking:
9356       initialize(true  /* fullGC */ ,
9357                  cause /* cause of the GC */,
9358                  true  /* recordGCBeginTime */,
9359                  true  /* recordPreGCUsage */,
9360                  false /* recordPeakUsage */,
9361                  false /* recordPostGCusage */,
9362                  true  /* recordAccumulatedGCTime */,
9363                  false /* recordGCEndTime */,
9364                  false /* countCollection */  );
9365       break;
9366 
9367     case CMSCollector::FinalMarking:
9368       initialize(true  /* fullGC */ ,
9369                  cause /* cause of the GC */,
9370                  false /* recordGCBeginTime */,
9371                  false /* recordPreGCUsage */,
9372                  false /* recordPeakUsage */,
9373                  false /* recordPostGCusage */,
9374                  true  /* recordAccumulatedGCTime */,
9375                  false /* recordGCEndTime */,
9376                  false /* countCollection */  );
9377       break;
9378 
9379     case CMSCollector::Sweeping:
9380       initialize(true  /* fullGC */ ,
9381                  cause /* cause of the GC */,
9382                  false /* recordGCBeginTime */,
9383                  false /* recordPreGCUsage */,
9384                  true  /* recordPeakUsage */,
9385                  true  /* recordPostGCusage */,
9386                  false /* recordAccumulatedGCTime */,
9387                  true  /* recordGCEndTime */,
9388                  true  /* countCollection */  );
9389       break;
9390 
9391     default:
9392       ShouldNotReachHere();
9393   }
9394 }
9395