1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/gcWhen.hpp" 32 #include "gc_implementation/shared/generationCounters.hpp" 33 #include "memory/freeBlockDictionary.hpp" 34 #include "memory/generation.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/virtualspace.hpp" 37 #include "services/memoryService.hpp" 38 #include "utilities/bitMap.inline.hpp" 39 #include "utilities/stack.inline.hpp" 40 #include "utilities/taskqueue.hpp" 41 #include "utilities/yieldingWorkgroup.hpp" 42 43 // ConcurrentMarkSweepGeneration is in support of a concurrent 44 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 45 // style. We assume, for now, that this generation is always the 46 // seniormost generation and for simplicity 47 // in the first implementation, that this generation is a single compactible 48 // space. Neither of these restrictions appears essential, and will be 49 // relaxed in the future when more time is available to implement the 50 // greater generality (and there's a need for it). 51 // 52 // Concurrent mode failures are currently handled by 53 // means of a sliding mark-compact. 54 55 class CMSAdaptiveSizePolicy; 56 class CMSConcMarkingTask; 57 class CMSGCAdaptivePolicyCounters; 58 class CMSTracer; 59 class ConcurrentGCTimer; 60 class ConcurrentMarkSweepGeneration; 61 class ConcurrentMarkSweepPolicy; 62 class ConcurrentMarkSweepThread; 63 class CompactibleFreeListSpace; 64 class FreeChunk; 65 class PromotionInfo; 66 class ScanMarkedObjectsAgainCarefullyClosure; 67 class TenuredGeneration; 68 class SerialOldTracer; 69 70 // A generic CMS bit map. It's the basis for both the CMS marking bit map 71 // as well as for the mod union table (in each case only a subset of the 72 // methods are used). This is essentially a wrapper around the BitMap class, 73 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 74 // we have _shifter == 0. and for the mod union table we have 75 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 76 // XXX 64-bit issues in BitMap? 77 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 78 friend class VMStructs; 79 80 HeapWord* _bmStartWord; // base address of range covered by map 81 size_t _bmWordSize; // map size (in #HeapWords covered) 82 const int _shifter; // shifts to convert HeapWord to bit position 83 VirtualSpace _virtual_space; // underlying the bit map 84 BitMap _bm; // the bit map itself 85 public: 86 Mutex* const _lock; // mutex protecting _bm; 87 88 public: 89 // constructor 90 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 91 92 // allocates the actual storage for the map 93 bool allocate(MemRegion mr); 94 // field getter 95 Mutex* lock() const { return _lock; } 96 // locking verifier convenience function 97 void assert_locked() const PRODUCT_RETURN; 98 99 // inquiries 100 HeapWord* startWord() const { return _bmStartWord; } 101 size_t sizeInWords() const { return _bmWordSize; } 102 size_t sizeInBits() const { return _bm.size(); } 103 // the following is one past the last word in space 104 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 105 106 // reading marks 107 bool isMarked(HeapWord* addr) const; 108 bool par_isMarked(HeapWord* addr) const; // do not lock checks 109 bool isUnmarked(HeapWord* addr) const; 110 bool isAllClear() const; 111 112 // writing marks 113 void mark(HeapWord* addr); 114 // For marking by parallel GC threads; 115 // returns true if we did, false if another thread did 116 bool par_mark(HeapWord* addr); 117 118 void mark_range(MemRegion mr); 119 void par_mark_range(MemRegion mr); 120 void mark_large_range(MemRegion mr); 121 void par_mark_large_range(MemRegion mr); 122 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 123 void clear_range(MemRegion mr); 124 void par_clear_range(MemRegion mr); 125 void clear_large_range(MemRegion mr); 126 void par_clear_large_range(MemRegion mr); 127 void clear_all(); 128 void clear_all_incrementally(); // Not yet implemented!! 129 130 NOT_PRODUCT( 131 // checks the memory region for validity 132 void region_invariant(MemRegion mr); 133 ) 134 135 // iteration 136 void iterate(BitMapClosure* cl) { 137 _bm.iterate(cl); 138 } 139 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 140 void dirty_range_iterate_clear(MemRegionClosure* cl); 141 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 142 143 // auxiliary support for iteration 144 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 145 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 146 HeapWord* end_addr) const; 147 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 148 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 149 HeapWord* end_addr) const; 150 MemRegion getAndClearMarkedRegion(HeapWord* addr); 151 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 152 HeapWord* end_addr); 153 154 // conversion utilities 155 HeapWord* offsetToHeapWord(size_t offset) const; 156 size_t heapWordToOffset(HeapWord* addr) const; 157 size_t heapWordDiffToOffsetDiff(size_t diff) const; 158 159 void print_on_error(outputStream* st, const char* prefix) const; 160 161 // debugging 162 // is this address range covered by the bit-map? 163 NOT_PRODUCT( 164 bool covers(MemRegion mr) const; 165 bool covers(HeapWord* start, size_t size = 0) const; 166 ) 167 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 168 }; 169 170 // Represents a marking stack used by the CMS collector. 171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 172 class CMSMarkStack: public CHeapObj<mtGC> { 173 // 174 friend class CMSCollector; // to get at expasion stats further below 175 // 176 177 VirtualSpace _virtual_space; // space for the stack 178 oop* _base; // bottom of stack 179 size_t _index; // one more than last occupied index 180 size_t _capacity; // max #elements 181 Mutex _par_lock; // an advisory lock used in case of parallel access 182 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 183 184 protected: 185 size_t _hit_limit; // we hit max stack size limit 186 size_t _failed_double; // we failed expansion before hitting limit 187 188 public: 189 CMSMarkStack(): 190 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 191 _hit_limit(0), 192 _failed_double(0) {} 193 194 bool allocate(size_t size); 195 196 size_t capacity() const { return _capacity; } 197 198 oop pop() { 199 if (!isEmpty()) { 200 return _base[--_index] ; 201 } 202 return NULL; 203 } 204 205 bool push(oop ptr) { 206 if (isFull()) { 207 return false; 208 } else { 209 _base[_index++] = ptr; 210 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 211 return true; 212 } 213 } 214 215 bool isEmpty() const { return _index == 0; } 216 bool isFull() const { 217 assert(_index <= _capacity, "buffer overflow"); 218 return _index == _capacity; 219 } 220 221 size_t length() { return _index; } 222 223 // "Parallel versions" of some of the above 224 oop par_pop() { 225 // lock and pop 226 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 227 return pop(); 228 } 229 230 bool par_push(oop ptr) { 231 // lock and push 232 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 233 return push(ptr); 234 } 235 236 // Forcibly reset the stack, losing all of its contents. 237 void reset() { 238 _index = 0; 239 } 240 241 // Expand the stack, typically in response to an overflow condition 242 void expand(); 243 244 // Compute the least valued stack element. 245 oop least_value(HeapWord* low) { 246 oop least = (oop)low; 247 for (size_t i = 0; i < _index; i++) { 248 least = MIN2(least, _base[i]); 249 } 250 return least; 251 } 252 253 // Exposed here to allow stack expansion in || case 254 Mutex* par_lock() { return &_par_lock; } 255 }; 256 257 class CardTableRS; 258 class CMSParGCThreadState; 259 260 class ModUnionClosure: public MemRegionClosure { 261 protected: 262 CMSBitMap* _t; 263 public: 264 ModUnionClosure(CMSBitMap* t): _t(t) { } 265 void do_MemRegion(MemRegion mr); 266 }; 267 268 class ModUnionClosurePar: public ModUnionClosure { 269 public: 270 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 271 void do_MemRegion(MemRegion mr); 272 }; 273 274 // Survivor Chunk Array in support of parallelization of 275 // Survivor Space rescan. 276 class ChunkArray: public CHeapObj<mtGC> { 277 size_t _index; 278 size_t _capacity; 279 size_t _overflows; 280 HeapWord** _array; // storage for array 281 282 public: 283 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} 284 ChunkArray(HeapWord** a, size_t c): 285 _index(0), _capacity(c), _overflows(0), _array(a) {} 286 287 HeapWord** array() { return _array; } 288 void set_array(HeapWord** a) { _array = a; } 289 290 size_t capacity() { return _capacity; } 291 void set_capacity(size_t c) { _capacity = c; } 292 293 size_t end() { 294 assert(_index <= capacity(), 295 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 296 _index, _capacity)); 297 return _index; 298 } // exclusive 299 300 HeapWord* nth(size_t n) { 301 assert(n < end(), "Out of bounds access"); 302 return _array[n]; 303 } 304 305 void reset() { 306 _index = 0; 307 if (_overflows > 0 && PrintCMSStatistics > 1) { 308 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 309 _capacity, _overflows); 310 } 311 _overflows = 0; 312 } 313 314 void record_sample(HeapWord* p, size_t sz) { 315 // For now we do not do anything with the size 316 if (_index < _capacity) { 317 _array[_index++] = p; 318 } else { 319 ++_overflows; 320 assert(_index == _capacity, 321 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 322 "): out of bounds at overflow#" SIZE_FORMAT, 323 _index, _capacity, _overflows)); 324 } 325 } 326 }; 327 328 // 329 // Timing, allocation and promotion statistics for gc scheduling and incremental 330 // mode pacing. Most statistics are exponential averages. 331 // 332 class CMSStats VALUE_OBJ_CLASS_SPEC { 333 private: 334 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 335 336 // The following are exponential averages with factor alpha: 337 // avg = (100 - alpha) * avg + alpha * cur_sample 338 // 339 // The durations measure: end_time[n] - start_time[n] 340 // The periods measure: start_time[n] - start_time[n-1] 341 // 342 // The cms period and duration include only concurrent collections; time spent 343 // in foreground cms collections due to System.gc() or because of a failure to 344 // keep up are not included. 345 // 346 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 347 // real value, but is used only after the first period. A value of 100 is 348 // used for the first sample so it gets the entire weight. 349 unsigned int _saved_alpha; // 0-100 350 unsigned int _gc0_alpha; 351 unsigned int _cms_alpha; 352 353 double _gc0_duration; 354 double _gc0_period; 355 size_t _gc0_promoted; // bytes promoted per gc0 356 double _cms_duration; 357 double _cms_duration_pre_sweep; // time from initiation to start of sweep 358 double _cms_duration_per_mb; 359 double _cms_period; 360 size_t _cms_allocated; // bytes of direct allocation per gc0 period 361 362 // Timers. 363 elapsedTimer _cms_timer; 364 TimeStamp _gc0_begin_time; 365 TimeStamp _cms_begin_time; 366 TimeStamp _cms_end_time; 367 368 // Snapshots of the amount used in the CMS generation. 369 size_t _cms_used_at_gc0_begin; 370 size_t _cms_used_at_gc0_end; 371 size_t _cms_used_at_cms_begin; 372 373 // Used to prevent the duty cycle from being reduced in the middle of a cms 374 // cycle. 375 bool _allow_duty_cycle_reduction; 376 377 enum { 378 _GC0_VALID = 0x1, 379 _CMS_VALID = 0x2, 380 _ALL_VALID = _GC0_VALID | _CMS_VALID 381 }; 382 383 unsigned int _valid_bits; 384 385 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 386 387 protected: 388 389 // Return a duty cycle that avoids wild oscillations, by limiting the amount 390 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 391 // as a recommended value). 392 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 393 unsigned int new_duty_cycle); 394 unsigned int icms_update_duty_cycle_impl(); 395 396 // In support of adjusting of cms trigger ratios based on history 397 // of concurrent mode failure. 398 double cms_free_adjustment_factor(size_t free) const; 399 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 400 401 public: 402 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 403 unsigned int alpha = CMSExpAvgFactor); 404 405 // Whether or not the statistics contain valid data; higher level statistics 406 // cannot be called until this returns true (they require at least one young 407 // gen and one cms cycle to have completed). 408 bool valid() const; 409 410 // Record statistics. 411 void record_gc0_begin(); 412 void record_gc0_end(size_t cms_gen_bytes_used); 413 void record_cms_begin(); 414 void record_cms_end(); 415 416 // Allow management of the cms timer, which must be stopped/started around 417 // yield points. 418 elapsedTimer& cms_timer() { return _cms_timer; } 419 void start_cms_timer() { _cms_timer.start(); } 420 void stop_cms_timer() { _cms_timer.stop(); } 421 422 // Basic statistics; units are seconds or bytes. 423 double gc0_period() const { return _gc0_period; } 424 double gc0_duration() const { return _gc0_duration; } 425 size_t gc0_promoted() const { return _gc0_promoted; } 426 double cms_period() const { return _cms_period; } 427 double cms_duration() const { return _cms_duration; } 428 double cms_duration_per_mb() const { return _cms_duration_per_mb; } 429 size_t cms_allocated() const { return _cms_allocated; } 430 431 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 432 433 // Seconds since the last background cms cycle began or ended. 434 double cms_time_since_begin() const; 435 double cms_time_since_end() const; 436 437 // Higher level statistics--caller must check that valid() returns true before 438 // calling. 439 440 // Returns bytes promoted per second of wall clock time. 441 double promotion_rate() const; 442 443 // Returns bytes directly allocated per second of wall clock time. 444 double cms_allocation_rate() const; 445 446 // Rate at which space in the cms generation is being consumed (sum of the 447 // above two). 448 double cms_consumption_rate() const; 449 450 // Returns an estimate of the number of seconds until the cms generation will 451 // fill up, assuming no collection work is done. 452 double time_until_cms_gen_full() const; 453 454 // Returns an estimate of the number of seconds remaining until 455 // the cms generation collection should start. 456 double time_until_cms_start() const; 457 458 // End of higher level statistics. 459 460 // Returns the cms incremental mode duty cycle, as a percentage (0-100). 461 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 462 463 // Update the duty cycle and return the new value. 464 unsigned int icms_update_duty_cycle(); 465 466 // Debugging. 467 void print_on(outputStream* st) const PRODUCT_RETURN; 468 void print() const { print_on(gclog_or_tty); } 469 }; 470 471 // A closure related to weak references processing which 472 // we embed in the CMSCollector, since we need to pass 473 // it to the reference processor for secondary filtering 474 // of references based on reachability of referent; 475 // see role of _is_alive_non_header closure in the 476 // ReferenceProcessor class. 477 // For objects in the CMS generation, this closure checks 478 // if the object is "live" (reachable). Used in weak 479 // reference processing. 480 class CMSIsAliveClosure: public BoolObjectClosure { 481 const MemRegion _span; 482 const CMSBitMap* _bit_map; 483 484 friend class CMSCollector; 485 public: 486 CMSIsAliveClosure(MemRegion span, 487 CMSBitMap* bit_map): 488 _span(span), 489 _bit_map(bit_map) { 490 assert(!span.is_empty(), "Empty span could spell trouble"); 491 } 492 493 bool do_object_b(oop obj); 494 }; 495 496 497 // Implements AbstractRefProcTaskExecutor for CMS. 498 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 499 public: 500 501 CMSRefProcTaskExecutor(CMSCollector& collector) 502 : _collector(collector) 503 { } 504 505 // Executes a task using worker threads. 506 virtual void execute(ProcessTask& task); 507 virtual void execute(EnqueueTask& task); 508 private: 509 CMSCollector& _collector; 510 }; 511 512 513 class CMSCollector: public CHeapObj<mtGC> { 514 friend class VMStructs; 515 friend class ConcurrentMarkSweepThread; 516 friend class ConcurrentMarkSweepGeneration; 517 friend class CompactibleFreeListSpace; 518 friend class CMSParMarkTask; 519 friend class CMSParInitialMarkTask; 520 friend class CMSParRemarkTask; 521 friend class CMSConcMarkingTask; 522 friend class CMSRefProcTaskProxy; 523 friend class CMSRefProcTaskExecutor; 524 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 525 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 526 friend class PushOrMarkClosure; // to access _restart_addr 527 friend class Par_PushOrMarkClosure; // to access _restart_addr 528 friend class MarkFromRootsClosure; // -- ditto -- 529 // ... and for clearing cards 530 friend class Par_MarkFromRootsClosure; // to access _restart_addr 531 // ... and for clearing cards 532 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 533 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 534 friend class PushAndMarkVerifyClosure; // -- ditto -- 535 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 536 friend class PushAndMarkClosure; // -- ditto -- 537 friend class Par_PushAndMarkClosure; // -- ditto -- 538 friend class CMSKeepAliveClosure; // -- ditto -- 539 friend class CMSDrainMarkingStackClosure; // -- ditto -- 540 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 541 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 542 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 543 friend class VM_CMS_Operation; 544 friend class VM_CMS_Initial_Mark; 545 friend class VM_CMS_Final_Remark; 546 friend class TraceCMSMemoryManagerStats; 547 548 private: 549 jlong _time_of_last_gc; 550 void update_time_of_last_gc(jlong now) { 551 _time_of_last_gc = now; 552 } 553 554 OopTaskQueueSet* _task_queues; 555 556 // Overflow list of grey objects, threaded through mark-word 557 // Manipulated with CAS in the parallel/multi-threaded case. 558 oop _overflow_list; 559 // The following array-pair keeps track of mark words 560 // displaced for accomodating overflow list above. 561 // This code will likely be revisited under RFE#4922830. 562 Stack<oop, mtGC> _preserved_oop_stack; 563 Stack<markOop, mtGC> _preserved_mark_stack; 564 565 int* _hash_seed; 566 567 // In support of multi-threaded concurrent phases 568 YieldingFlexibleWorkGang* _conc_workers; 569 570 // Performance Counters 571 CollectorCounters* _gc_counters; 572 573 // Initialization Errors 574 bool _completed_initialization; 575 576 // In support of ExplicitGCInvokesConcurrent 577 static bool _full_gc_requested; 578 static GCCause::Cause _full_gc_cause; 579 unsigned int _collection_count_start; 580 581 // Should we unload classes this concurrent cycle? 582 bool _should_unload_classes; 583 unsigned int _concurrent_cycles_since_last_unload; 584 unsigned int concurrent_cycles_since_last_unload() const { 585 return _concurrent_cycles_since_last_unload; 586 } 587 // Did we (allow) unload classes in the previous concurrent cycle? 588 bool unloaded_classes_last_cycle() const { 589 return concurrent_cycles_since_last_unload() == 0; 590 } 591 // Root scanning options for perm gen 592 int _roots_scanning_options; 593 int roots_scanning_options() const { return _roots_scanning_options; } 594 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } 595 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 596 597 // Verification support 598 CMSBitMap _verification_mark_bm; 599 void verify_after_remark_work_1(); 600 void verify_after_remark_work_2(); 601 602 // true if any verification flag is on. 603 bool _verifying; 604 bool verifying() const { return _verifying; } 605 void set_verifying(bool v) { _verifying = v; } 606 607 // Collector policy 608 ConcurrentMarkSweepPolicy* _collector_policy; 609 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 610 611 void set_did_compact(bool v); 612 613 // XXX Move these to CMSStats ??? FIX ME !!! 614 elapsedTimer _inter_sweep_timer; // time between sweeps 615 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 616 // padded decaying average estimates of the above 617 AdaptivePaddedAverage _inter_sweep_estimate; 618 AdaptivePaddedAverage _intra_sweep_estimate; 619 620 CMSTracer* _gc_tracer_cm; 621 ConcurrentGCTimer* _gc_timer_cm; 622 623 bool _cms_start_registered; 624 625 GCHeapSummary _last_heap_summary; 626 MetaspaceSummary _last_metaspace_summary; 627 628 void register_foreground_gc_start(GCCause::Cause cause); 629 void register_gc_start(GCCause::Cause cause); 630 void register_gc_end(); 631 void save_heap_summary(); 632 void report_heap_summary(GCWhen::Type when); 633 634 protected: 635 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 636 MemRegion _span; // span covering above two 637 CardTableRS* _ct; // card table 638 639 // CMS marking support structures 640 CMSBitMap _markBitMap; 641 CMSBitMap _modUnionTable; 642 CMSMarkStack _markStack; 643 644 HeapWord* _restart_addr; // in support of marking stack overflow 645 void lower_restart_addr(HeapWord* low); 646 647 // Counters in support of marking stack / work queue overflow handling: 648 // a non-zero value indicates certain types of overflow events during 649 // the current CMS cycle and could lead to stack resizing efforts at 650 // an opportune future time. 651 size_t _ser_pmc_preclean_ovflw; 652 size_t _ser_pmc_remark_ovflw; 653 size_t _par_pmc_remark_ovflw; 654 size_t _ser_kac_preclean_ovflw; 655 size_t _ser_kac_ovflw; 656 size_t _par_kac_ovflw; 657 NOT_PRODUCT(ssize_t _num_par_pushes;) 658 659 // ("Weak") Reference processing support 660 ReferenceProcessor* _ref_processor; 661 CMSIsAliveClosure _is_alive_closure; 662 // keep this textually after _markBitMap and _span; c'tor dependency 663 664 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 665 ModUnionClosure _modUnionClosure; 666 ModUnionClosurePar _modUnionClosurePar; 667 668 // CMS abstract state machine 669 // initial_state: Idling 670 // next_state(Idling) = {Marking} 671 // next_state(Marking) = {Precleaning, Sweeping} 672 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 673 // next_state(AbortablePreclean) = {FinalMarking} 674 // next_state(FinalMarking) = {Sweeping} 675 // next_state(Sweeping) = {Resizing} 676 // next_state(Resizing) = {Resetting} 677 // next_state(Resetting) = {Idling} 678 // The numeric values below are chosen so that: 679 // . _collectorState <= Idling == post-sweep && pre-mark 680 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 681 // precleaning || abortablePrecleanb 682 public: 683 enum CollectorState { 684 Resizing = 0, 685 Resetting = 1, 686 Idling = 2, 687 InitialMarking = 3, 688 Marking = 4, 689 Precleaning = 5, 690 AbortablePreclean = 6, 691 FinalMarking = 7, 692 Sweeping = 8 693 }; 694 protected: 695 static CollectorState _collectorState; 696 697 // State related to prologue/epilogue invocation for my generations 698 bool _between_prologue_and_epilogue; 699 700 // Signalling/State related to coordination between fore- and backgroud GC 701 // Note: When the baton has been passed from background GC to foreground GC, 702 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 703 static bool _foregroundGCIsActive; // true iff foreground collector is active or 704 // wants to go active 705 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 706 // yet passed the baton to the foreground GC 707 708 // Support for CMSScheduleRemark (abortable preclean) 709 bool _abort_preclean; 710 bool _start_sampling; 711 712 int _numYields; 713 size_t _numDirtyCards; 714 size_t _sweep_count; 715 // number of full gc's since the last concurrent gc. 716 uint _full_gcs_since_conc_gc; 717 718 // occupancy used for bootstrapping stats 719 double _bootstrap_occupancy; 720 721 // timer 722 elapsedTimer _timer; 723 724 // Timing, allocation and promotion statistics, used for scheduling. 725 CMSStats _stats; 726 727 // Allocation limits installed in the young gen, used only in 728 // CMSIncrementalMode. When an allocation in the young gen would cross one of 729 // these limits, the cms generation is notified and the cms thread is started 730 // or stopped, respectively. 731 HeapWord* _icms_start_limit; 732 HeapWord* _icms_stop_limit; 733 734 enum CMS_op_type { 735 CMS_op_checkpointRootsInitial, 736 CMS_op_checkpointRootsFinal 737 }; 738 739 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 740 bool stop_world_and_do(CMS_op_type op); 741 742 OopTaskQueueSet* task_queues() { return _task_queues; } 743 int* hash_seed(int i) { return &_hash_seed[i]; } 744 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 745 746 // Support for parallelizing Eden rescan in CMS remark phase 747 void sample_eden(); // ... sample Eden space top 748 749 private: 750 // Support for parallelizing young gen rescan in CMS remark phase 751 Generation* _young_gen; // the younger gen 752 HeapWord** _top_addr; // ... Top of Eden 753 HeapWord** _end_addr; // ... End of Eden 754 HeapWord** _eden_chunk_array; // ... Eden partitioning array 755 size_t _eden_chunk_index; // ... top (exclusive) of array 756 size_t _eden_chunk_capacity; // ... max entries in array 757 758 // Support for parallelizing survivor space rescan 759 HeapWord** _survivor_chunk_array; 760 size_t _survivor_chunk_index; 761 size_t _survivor_chunk_capacity; 762 size_t* _cursor; 763 ChunkArray* _survivor_plab_array; 764 765 // Support for marking stack overflow handling 766 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 767 bool par_take_from_overflow_list(size_t num, 768 OopTaskQueue* to_work_q, 769 int no_of_gc_threads); 770 void push_on_overflow_list(oop p); 771 void par_push_on_overflow_list(oop p); 772 // the following is, obviously, not, in general, "MT-stable" 773 bool overflow_list_is_empty() const; 774 775 void preserve_mark_if_necessary(oop p); 776 void par_preserve_mark_if_necessary(oop p); 777 void preserve_mark_work(oop p, markOop m); 778 void restore_preserved_marks_if_any(); 779 NOT_PRODUCT(bool no_preserved_marks() const;) 780 // in support of testing overflow code 781 NOT_PRODUCT(int _overflow_counter;) 782 NOT_PRODUCT(bool simulate_overflow();) // sequential 783 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 784 785 // CMS work methods 786 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 787 788 // a return value of false indicates failure due to stack overflow 789 bool markFromRootsWork(bool asynch); // concurrent marking work 790 791 public: // FIX ME!!! only for testing 792 bool do_marking_st(bool asynch); // single-threaded marking 793 bool do_marking_mt(bool asynch); // multi-threaded marking 794 795 private: 796 797 // concurrent precleaning work 798 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 799 ScanMarkedObjectsAgainCarefullyClosure* cl); 800 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 801 ScanMarkedObjectsAgainCarefullyClosure* cl); 802 // Does precleaning work, returning a quantity indicative of 803 // the amount of "useful work" done. 804 size_t preclean_work(bool clean_refs, bool clean_survivors); 805 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 806 void abortable_preclean(); // Preclean while looking for possible abort 807 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 808 // Helper function for above; merge-sorts the per-thread plab samples 809 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 810 // Resets (i.e. clears) the per-thread plab sample vectors 811 void reset_survivor_plab_arrays(); 812 813 // final (second) checkpoint work 814 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 815 bool init_mark_was_synchronous); 816 // work routine for parallel version of remark 817 void do_remark_parallel(); 818 // work routine for non-parallel version of remark 819 void do_remark_non_parallel(); 820 // reference processing work routine (during second checkpoint) 821 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 822 823 // concurrent sweeping work 824 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 825 826 // (concurrent) resetting of support data structures 827 void reset(bool asynch); 828 829 // Clear _expansion_cause fields of constituent generations 830 void clear_expansion_cause(); 831 832 // An auxilliary method used to record the ends of 833 // used regions of each generation to limit the extent of sweep 834 void save_sweep_limits(); 835 836 // A work method used by foreground collection to determine 837 // what type of collection (compacting or not, continuing or fresh) 838 // it should do. 839 void decide_foreground_collection_type(bool clear_all_soft_refs, 840 bool* should_compact, bool* should_start_over); 841 842 // A work method used by the foreground collector to do 843 // a mark-sweep-compact. 844 void do_compaction_work(bool clear_all_soft_refs); 845 846 // A work method used by the foreground collector to do 847 // a mark-sweep, after taking over from a possibly on-going 848 // concurrent mark-sweep collection. 849 void do_mark_sweep_work(bool clear_all_soft_refs, 850 CollectorState first_state, bool should_start_over); 851 852 // Work methods for reporting concurrent mode interruption or failure 853 bool is_external_interruption(); 854 void report_concurrent_mode_interruption(); 855 856 // If the backgrould GC is active, acquire control from the background 857 // GC and do the collection. 858 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 859 860 // For synchronizing passing of control from background to foreground 861 // GC. waitForForegroundGC() is called by the background 862 // collector. It if had to wait for a foreground collection, 863 // it returns true and the background collection should assume 864 // that the collection was finished by the foreground 865 // collector. 866 bool waitForForegroundGC(); 867 868 // Incremental mode triggering: recompute the icms duty cycle and set the 869 // allocation limits in the young gen. 870 void icms_update_allocation_limits(); 871 872 size_t block_size_using_printezis_bits(HeapWord* addr) const; 873 size_t block_size_if_printezis_bits(HeapWord* addr) const; 874 HeapWord* next_card_start_after_block(HeapWord* addr) const; 875 876 void setup_cms_unloading_and_verification_state(); 877 public: 878 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 879 CardTableRS* ct, 880 ConcurrentMarkSweepPolicy* cp); 881 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 882 883 ReferenceProcessor* ref_processor() { return _ref_processor; } 884 void ref_processor_init(); 885 886 Mutex* bitMapLock() const { return _markBitMap.lock(); } 887 static CollectorState abstract_state() { return _collectorState; } 888 889 bool should_abort_preclean() const; // Whether preclean should be aborted. 890 size_t get_eden_used() const; 891 size_t get_eden_capacity() const; 892 893 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 894 895 // locking checks 896 NOT_PRODUCT(static bool have_cms_token();) 897 898 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 899 bool shouldConcurrentCollect(); 900 901 void collect(bool full, 902 bool clear_all_soft_refs, 903 size_t size, 904 bool tlab); 905 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); 906 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); 907 908 // In support of ExplicitGCInvokesConcurrent 909 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 910 // Should we unload classes in a particular concurrent cycle? 911 bool should_unload_classes() const { 912 return _should_unload_classes; 913 } 914 void update_should_unload_classes(); 915 916 void direct_allocated(HeapWord* start, size_t size); 917 918 // Object is dead if not marked and current phase is sweeping. 919 bool is_dead_obj(oop obj) const; 920 921 // After a promotion (of "start"), do any necessary marking. 922 // If "par", then it's being done by a parallel GC thread. 923 // The last two args indicate if we need precise marking 924 // and if so the size of the object so it can be dirtied 925 // in its entirety. 926 void promoted(bool par, HeapWord* start, 927 bool is_obj_array, size_t obj_size); 928 929 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 930 size_t word_size); 931 932 void getFreelistLocks() const; 933 void releaseFreelistLocks() const; 934 bool haveFreelistLocks() const; 935 936 // Adjust size of underlying generation 937 void compute_new_size(); 938 939 // GC prologue and epilogue 940 void gc_prologue(bool full); 941 void gc_epilogue(bool full); 942 943 jlong time_of_last_gc(jlong now) { 944 if (_collectorState <= Idling) { 945 // gc not in progress 946 return _time_of_last_gc; 947 } else { 948 // collection in progress 949 return now; 950 } 951 } 952 953 // Support for parallel remark of survivor space 954 void* get_data_recorder(int thr_num); 955 956 CMSBitMap* markBitMap() { return &_markBitMap; } 957 void directAllocated(HeapWord* start, size_t size); 958 959 // main CMS steps and related support 960 void checkpointRootsInitial(bool asynch); 961 bool markFromRoots(bool asynch); // a return value of false indicates failure 962 // due to stack overflow 963 void preclean(); 964 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 965 bool init_mark_was_synchronous); 966 void sweep(bool asynch); 967 968 // Check that the currently executing thread is the expected 969 // one (foreground collector or background collector). 970 static void check_correct_thread_executing() PRODUCT_RETURN; 971 // XXXPERM void print_statistics() PRODUCT_RETURN; 972 973 bool is_cms_reachable(HeapWord* addr); 974 975 // Performance Counter Support 976 CollectorCounters* counters() { return _gc_counters; } 977 978 // timer stuff 979 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } 980 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } 981 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } 982 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 983 984 int yields() { return _numYields; } 985 void resetYields() { _numYields = 0; } 986 void incrementYields() { _numYields++; } 987 void resetNumDirtyCards() { _numDirtyCards = 0; } 988 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } 989 size_t numDirtyCards() { return _numDirtyCards; } 990 991 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } 992 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } 993 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } 994 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } 995 size_t sweep_count() const { return _sweep_count; } 996 void increment_sweep_count() { _sweep_count++; } 997 998 // Timers/stats for gc scheduling and incremental mode pacing. 999 CMSStats& stats() { return _stats; } 1000 1001 // Convenience methods that check whether CMSIncrementalMode is enabled and 1002 // forward to the corresponding methods in ConcurrentMarkSweepThread. 1003 static void start_icms(); 1004 static void stop_icms(); // Called at the end of the cms cycle. 1005 static void disable_icms(); // Called before a foreground collection. 1006 static void enable_icms(); // Called after a foreground collection. 1007 void icms_wait(); // Called at yield points. 1008 1009 // Adaptive size policy 1010 CMSAdaptiveSizePolicy* size_policy(); 1011 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1012 1013 static void print_on_error(outputStream* st); 1014 1015 // debugging 1016 void verify(); 1017 bool verify_after_remark(bool silent = VerifySilently); 1018 void verify_ok_to_terminate() const PRODUCT_RETURN; 1019 void verify_work_stacks_empty() const PRODUCT_RETURN; 1020 void verify_overflow_empty() const PRODUCT_RETURN; 1021 1022 // convenience methods in support of debugging 1023 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 1024 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 1025 1026 // accessors 1027 CMSMarkStack* verification_mark_stack() { return &_markStack; } 1028 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 1029 1030 // Initialization errors 1031 bool completed_initialization() { return _completed_initialization; } 1032 }; 1033 1034 class CMSExpansionCause : public AllStatic { 1035 public: 1036 enum Cause { 1037 _no_expansion, 1038 _satisfy_free_ratio, 1039 _satisfy_promotion, 1040 _satisfy_allocation, 1041 _allocate_par_lab, 1042 _allocate_par_spooling_space, 1043 _adaptive_size_policy 1044 }; 1045 // Return a string describing the cause of the expansion. 1046 static const char* to_string(CMSExpansionCause::Cause cause); 1047 }; 1048 1049 class ConcurrentMarkSweepGeneration: public CardGeneration { 1050 friend class VMStructs; 1051 friend class ConcurrentMarkSweepThread; 1052 friend class ConcurrentMarkSweep; 1053 friend class CMSCollector; 1054 protected: 1055 static CMSCollector* _collector; // the collector that collects us 1056 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1057 1058 // Performance Counters 1059 GenerationCounters* _gen_counters; 1060 GSpaceCounters* _space_counters; 1061 1062 // Words directly allocated, used by CMSStats. 1063 size_t _direct_allocated_words; 1064 1065 // Non-product stat counters 1066 NOT_PRODUCT( 1067 size_t _numObjectsPromoted; 1068 size_t _numWordsPromoted; 1069 size_t _numObjectsAllocated; 1070 size_t _numWordsAllocated; 1071 ) 1072 1073 // Used for sizing decisions 1074 bool _incremental_collection_failed; 1075 bool incremental_collection_failed() { 1076 return _incremental_collection_failed; 1077 } 1078 void set_incremental_collection_failed() { 1079 _incremental_collection_failed = true; 1080 } 1081 void clear_incremental_collection_failed() { 1082 _incremental_collection_failed = false; 1083 } 1084 1085 // accessors 1086 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} 1087 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1088 1089 private: 1090 // For parallel young-gen GC support. 1091 CMSParGCThreadState** _par_gc_thread_states; 1092 1093 // Reason generation was expanded 1094 CMSExpansionCause::Cause _expansion_cause; 1095 1096 // In support of MinChunkSize being larger than min object size 1097 const double _dilatation_factor; 1098 1099 enum CollectionTypes { 1100 Concurrent_collection_type = 0, 1101 MS_foreground_collection_type = 1, 1102 MSC_foreground_collection_type = 2, 1103 Unknown_collection_type = 3 1104 }; 1105 1106 CollectionTypes _debug_collection_type; 1107 1108 // True if a compactiing collection was done. 1109 bool _did_compact; 1110 bool did_compact() { return _did_compact; } 1111 1112 // Fraction of current occupancy at which to start a CMS collection which 1113 // will collect this generation (at least). 1114 double _initiating_occupancy; 1115 1116 protected: 1117 // Shrink generation by specified size (returns false if unable to shrink) 1118 void shrink_free_list_by(size_t bytes); 1119 1120 // Update statistics for GC 1121 virtual void update_gc_stats(int level, bool full); 1122 1123 // Maximum available space in the generation (including uncommitted) 1124 // space. 1125 size_t max_available() const; 1126 1127 // getter and initializer for _initiating_occupancy field. 1128 double initiating_occupancy() const { return _initiating_occupancy; } 1129 void init_initiating_occupancy(intx io, uintx tr); 1130 1131 public: 1132 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1133 int level, CardTableRS* ct, 1134 bool use_adaptive_freelists, 1135 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1136 1137 // Accessors 1138 CMSCollector* collector() const { return _collector; } 1139 static void set_collector(CMSCollector* collector) { 1140 assert(_collector == NULL, "already set"); 1141 _collector = collector; 1142 } 1143 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1144 1145 Mutex* freelistLock() const; 1146 1147 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1148 1149 // Adaptive size policy 1150 CMSAdaptiveSizePolicy* size_policy(); 1151 1152 void set_did_compact(bool v) { _did_compact = v; } 1153 1154 bool refs_discovery_is_atomic() const { return false; } 1155 bool refs_discovery_is_mt() const { 1156 // Note: CMS does MT-discovery during the parallel-remark 1157 // phases. Use ReferenceProcessorMTMutator to make refs 1158 // discovery MT-safe during such phases or other parallel 1159 // discovery phases in the future. This may all go away 1160 // if/when we decide that refs discovery is sufficiently 1161 // rare that the cost of the CAS's involved is in the 1162 // noise. That's a measurement that should be done, and 1163 // the code simplified if that turns out to be the case. 1164 return ConcGCThreads > 1; 1165 } 1166 1167 // Override 1168 virtual void ref_processor_init(); 1169 1170 // Grow generation by specified size (returns false if unable to grow) 1171 bool grow_by(size_t bytes); 1172 // Grow generation to reserved size. 1173 bool grow_to_reserved(); 1174 1175 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1176 1177 // Space enquiries 1178 size_t capacity() const; 1179 size_t used() const; 1180 size_t free() const; 1181 double occupancy() const { return ((double)used())/((double)capacity()); } 1182 size_t contiguous_available() const; 1183 size_t unsafe_max_alloc_nogc() const; 1184 1185 // over-rides 1186 MemRegion used_region() const; 1187 MemRegion used_region_at_save_marks() const; 1188 1189 // Does a "full" (forced) collection invoked on this generation collect 1190 // all younger generations as well? Note that the second conjunct is a 1191 // hack to allow the collection of the younger gen first if the flag is 1192 // set. This is better than using th policy's should_collect_gen0_first() 1193 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. 1194 virtual bool full_collects_younger_generations() const { 1195 return UseCMSCompactAtFullCollection && !CollectGen0First; 1196 } 1197 1198 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1199 1200 // Support for compaction 1201 CompactibleSpace* first_compaction_space() const; 1202 // Adjust quantites in the generation affected by 1203 // the compaction. 1204 void reset_after_compaction(); 1205 1206 // Allocation support 1207 HeapWord* allocate(size_t size, bool tlab); 1208 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1209 oop promote(oop obj, size_t obj_size); 1210 HeapWord* par_allocate(size_t size, bool tlab) { 1211 return allocate(size, tlab); 1212 } 1213 1214 // Incremental mode triggering. 1215 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1216 size_t word_size); 1217 1218 // Used by CMSStats to track direct allocation. The value is sampled and 1219 // reset after each young gen collection. 1220 size_t direct_allocated_words() const { return _direct_allocated_words; } 1221 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1222 1223 // Overrides for parallel promotion. 1224 virtual oop par_promote(int thread_num, 1225 oop obj, markOop m, size_t word_sz); 1226 // This one should not be called for CMS. 1227 virtual void par_promote_alloc_undo(int thread_num, 1228 HeapWord* obj, size_t word_sz); 1229 virtual void par_promote_alloc_done(int thread_num); 1230 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1231 1232 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1233 1234 // Inform this (non-young) generation that a promotion failure was 1235 // encountered during a collection of a younger generation that 1236 // promotes into this generation. 1237 virtual void promotion_failure_occurred(); 1238 1239 bool should_collect(bool full, size_t size, bool tlab); 1240 virtual bool should_concurrent_collect() const; 1241 virtual bool is_too_full() const; 1242 void collect(bool full, 1243 bool clear_all_soft_refs, 1244 size_t size, 1245 bool tlab); 1246 1247 HeapWord* expand_and_allocate(size_t word_size, 1248 bool tlab, 1249 bool parallel = false); 1250 1251 // GC prologue and epilogue 1252 void gc_prologue(bool full); 1253 void gc_prologue_work(bool full, bool registerClosure, 1254 ModUnionClosure* modUnionClosure); 1255 void gc_epilogue(bool full); 1256 void gc_epilogue_work(bool full); 1257 1258 // Time since last GC of this generation 1259 jlong time_of_last_gc(jlong now) { 1260 return collector()->time_of_last_gc(now); 1261 } 1262 void update_time_of_last_gc(jlong now) { 1263 collector()-> update_time_of_last_gc(now); 1264 } 1265 1266 // Allocation failure 1267 void expand(size_t bytes, size_t expand_bytes, 1268 CMSExpansionCause::Cause cause); 1269 virtual bool expand(size_t bytes, size_t expand_bytes); 1270 void shrink(size_t bytes); 1271 void shrink_by(size_t bytes); 1272 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1273 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1274 1275 // Iteration support and related enquiries 1276 void save_marks(); 1277 bool no_allocs_since_save_marks(); 1278 void object_iterate_since_last_GC(ObjectClosure* cl); 1279 void younger_refs_iterate(OopsInGenClosure* cl); 1280 1281 // Iteration support specific to CMS generations 1282 void save_sweep_limit(); 1283 1284 // More iteration support 1285 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); 1286 virtual void oop_iterate(ExtendedOopClosure* cl); 1287 virtual void safe_object_iterate(ObjectClosure* cl); 1288 virtual void object_iterate(ObjectClosure* cl); 1289 1290 // Need to declare the full complement of closures, whether we'll 1291 // override them or not, or get message from the compiler: 1292 // oop_since_save_marks_iterate_nv hides virtual function... 1293 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1294 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1295 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1296 1297 // Smart allocation XXX -- move to CFLSpace? 1298 void setNearLargestChunk(); 1299 bool isNearLargestChunk(HeapWord* addr); 1300 1301 // Get the chunk at the end of the space. Delagates to 1302 // the space. 1303 FreeChunk* find_chunk_at_end(); 1304 1305 void post_compact(); 1306 1307 // Debugging 1308 void prepare_for_verify(); 1309 void verify(); 1310 void print_statistics() PRODUCT_RETURN; 1311 1312 // Performance Counters support 1313 virtual void update_counters(); 1314 virtual void update_counters(size_t used); 1315 void initialize_performance_counters(); 1316 CollectorCounters* counters() { return collector()->counters(); } 1317 1318 // Support for parallel remark of survivor space 1319 void* get_data_recorder(int thr_num) { 1320 //Delegate to collector 1321 return collector()->get_data_recorder(thr_num); 1322 } 1323 1324 // Printing 1325 const char* name() const; 1326 virtual const char* short_name() const { return "CMS"; } 1327 void print() const; 1328 void printOccupancy(const char* s); 1329 bool must_be_youngest() const { return false; } 1330 bool must_be_oldest() const { return true; } 1331 1332 // Resize the generation after a compacting GC. The 1333 // generation can be treated as a contiguous space 1334 // after the compaction. 1335 virtual void compute_new_size(); 1336 // Resize the generation after a non-compacting 1337 // collection. 1338 void compute_new_size_free_list(); 1339 1340 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1341 void rotate_debug_collection_type(); 1342 }; 1343 1344 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1345 1346 // Return the size policy from the heap's collector 1347 // policy casted to CMSAdaptiveSizePolicy*. 1348 CMSAdaptiveSizePolicy* cms_size_policy() const; 1349 1350 // Resize the generation based on the adaptive size 1351 // policy. 1352 void resize(size_t cur_promo, size_t desired_promo); 1353 1354 // Return the GC counters from the collector policy 1355 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1356 1357 virtual void shrink_by(size_t bytes); 1358 1359 public: 1360 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1361 int level, CardTableRS* ct, 1362 bool use_adaptive_freelists, 1363 FreeBlockDictionary<FreeChunk>::DictionaryChoice 1364 dictionaryChoice) : 1365 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1366 use_adaptive_freelists, dictionaryChoice) {} 1367 1368 virtual const char* short_name() const { return "ASCMS"; } 1369 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1370 1371 virtual void update_counters(); 1372 virtual void update_counters(size_t used); 1373 }; 1374 1375 // 1376 // Closures of various sorts used by CMS to accomplish its work 1377 // 1378 1379 // This closure is used to check that a certain set of oops is empty. 1380 class FalseClosure: public OopClosure { 1381 public: 1382 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } 1383 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } 1384 }; 1385 1386 // This closure is used to do concurrent marking from the roots 1387 // following the first checkpoint. 1388 class MarkFromRootsClosure: public BitMapClosure { 1389 CMSCollector* _collector; 1390 MemRegion _span; 1391 CMSBitMap* _bitMap; 1392 CMSBitMap* _mut; 1393 CMSMarkStack* _markStack; 1394 bool _yield; 1395 int _skipBits; 1396 HeapWord* _finger; 1397 HeapWord* _threshold; 1398 DEBUG_ONLY(bool _verifying;) 1399 1400 public: 1401 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1402 CMSBitMap* bitMap, 1403 CMSMarkStack* markStack, 1404 bool should_yield, bool verifying = false); 1405 bool do_bit(size_t offset); 1406 void reset(HeapWord* addr); 1407 inline void do_yield_check(); 1408 1409 private: 1410 void scanOopsInOop(HeapWord* ptr); 1411 void do_yield_work(); 1412 }; 1413 1414 // This closure is used to do concurrent multi-threaded 1415 // marking from the roots following the first checkpoint. 1416 // XXX This should really be a subclass of The serial version 1417 // above, but i have not had the time to refactor things cleanly. 1418 // That willbe done for Dolphin. 1419 class Par_MarkFromRootsClosure: public BitMapClosure { 1420 CMSCollector* _collector; 1421 MemRegion _whole_span; 1422 MemRegion _span; 1423 CMSBitMap* _bit_map; 1424 CMSBitMap* _mut; 1425 OopTaskQueue* _work_queue; 1426 CMSMarkStack* _overflow_stack; 1427 bool _yield; 1428 int _skip_bits; 1429 HeapWord* _finger; 1430 HeapWord* _threshold; 1431 CMSConcMarkingTask* _task; 1432 public: 1433 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1434 MemRegion span, 1435 CMSBitMap* bit_map, 1436 OopTaskQueue* work_queue, 1437 CMSMarkStack* overflow_stack, 1438 bool should_yield); 1439 bool do_bit(size_t offset); 1440 inline void do_yield_check(); 1441 1442 private: 1443 void scan_oops_in_oop(HeapWord* ptr); 1444 void do_yield_work(); 1445 bool get_work_from_overflow_stack(); 1446 }; 1447 1448 // The following closures are used to do certain kinds of verification of 1449 // CMS marking. 1450 class PushAndMarkVerifyClosure: public CMSOopClosure { 1451 CMSCollector* _collector; 1452 MemRegion _span; 1453 CMSBitMap* _verification_bm; 1454 CMSBitMap* _cms_bm; 1455 CMSMarkStack* _mark_stack; 1456 protected: 1457 void do_oop(oop p); 1458 template <class T> inline void do_oop_work(T *p) { 1459 oop obj = oopDesc::load_decode_heap_oop(p); 1460 do_oop(obj); 1461 } 1462 public: 1463 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1464 MemRegion span, 1465 CMSBitMap* verification_bm, 1466 CMSBitMap* cms_bm, 1467 CMSMarkStack* mark_stack); 1468 void do_oop(oop* p); 1469 void do_oop(narrowOop* p); 1470 1471 // Deal with a stack overflow condition 1472 void handle_stack_overflow(HeapWord* lost); 1473 }; 1474 1475 class MarkFromRootsVerifyClosure: public BitMapClosure { 1476 CMSCollector* _collector; 1477 MemRegion _span; 1478 CMSBitMap* _verification_bm; 1479 CMSBitMap* _cms_bm; 1480 CMSMarkStack* _mark_stack; 1481 HeapWord* _finger; 1482 PushAndMarkVerifyClosure _pam_verify_closure; 1483 public: 1484 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1485 CMSBitMap* verification_bm, 1486 CMSBitMap* cms_bm, 1487 CMSMarkStack* mark_stack); 1488 bool do_bit(size_t offset); 1489 void reset(HeapWord* addr); 1490 }; 1491 1492 1493 // This closure is used to check that a certain set of bits is 1494 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1495 class FalseBitMapClosure: public BitMapClosure { 1496 public: 1497 bool do_bit(size_t offset) { 1498 guarantee(false, "Should not have a 1 bit"); 1499 return true; 1500 } 1501 }; 1502 1503 // This closure is used during the second checkpointing phase 1504 // to rescan the marked objects on the dirty cards in the mod 1505 // union table and the card table proper. It's invoked via 1506 // MarkFromDirtyCardsClosure below. It uses either 1507 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1508 // declared in genOopClosures.hpp to accomplish some of its work. 1509 // In the parallel case the bitMap is shared, so access to 1510 // it needs to be suitably synchronized for updates by embedded 1511 // closures that update it; however, this closure itself only 1512 // reads the bit_map and because it is idempotent, is immune to 1513 // reading stale values. 1514 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1515 #ifdef ASSERT 1516 CMSCollector* _collector; 1517 MemRegion _span; 1518 union { 1519 CMSMarkStack* _mark_stack; 1520 OopTaskQueue* _work_queue; 1521 }; 1522 #endif // ASSERT 1523 bool _parallel; 1524 CMSBitMap* _bit_map; 1525 union { 1526 MarkRefsIntoAndScanClosure* _scan_closure; 1527 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1528 }; 1529 1530 public: 1531 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1532 MemRegion span, 1533 ReferenceProcessor* rp, 1534 CMSBitMap* bit_map, 1535 CMSMarkStack* mark_stack, 1536 MarkRefsIntoAndScanClosure* cl): 1537 #ifdef ASSERT 1538 _collector(collector), 1539 _span(span), 1540 _mark_stack(mark_stack), 1541 #endif // ASSERT 1542 _parallel(false), 1543 _bit_map(bit_map), 1544 _scan_closure(cl) { } 1545 1546 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1547 MemRegion span, 1548 ReferenceProcessor* rp, 1549 CMSBitMap* bit_map, 1550 OopTaskQueue* work_queue, 1551 Par_MarkRefsIntoAndScanClosure* cl): 1552 #ifdef ASSERT 1553 _collector(collector), 1554 _span(span), 1555 _work_queue(work_queue), 1556 #endif // ASSERT 1557 _parallel(true), 1558 _bit_map(bit_map), 1559 _par_scan_closure(cl) { } 1560 1561 bool do_object_b(oop obj) { 1562 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1563 return false; 1564 } 1565 bool do_object_bm(oop p, MemRegion mr); 1566 }; 1567 1568 // This closure is used during the second checkpointing phase 1569 // to rescan the marked objects on the dirty cards in the mod 1570 // union table and the card table proper. It invokes 1571 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1572 // In the parallel case, the bit map is shared and requires 1573 // synchronized access. 1574 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1575 CompactibleFreeListSpace* _space; 1576 ScanMarkedObjectsAgainClosure _scan_cl; 1577 size_t _num_dirty_cards; 1578 1579 public: 1580 MarkFromDirtyCardsClosure(CMSCollector* collector, 1581 MemRegion span, 1582 CompactibleFreeListSpace* space, 1583 CMSBitMap* bit_map, 1584 CMSMarkStack* mark_stack, 1585 MarkRefsIntoAndScanClosure* cl): 1586 _space(space), 1587 _num_dirty_cards(0), 1588 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1589 mark_stack, cl) { } 1590 1591 MarkFromDirtyCardsClosure(CMSCollector* collector, 1592 MemRegion span, 1593 CompactibleFreeListSpace* space, 1594 CMSBitMap* bit_map, 1595 OopTaskQueue* work_queue, 1596 Par_MarkRefsIntoAndScanClosure* cl): 1597 _space(space), 1598 _num_dirty_cards(0), 1599 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1600 work_queue, cl) { } 1601 1602 void do_MemRegion(MemRegion mr); 1603 void set_space(CompactibleFreeListSpace* space) { _space = space; } 1604 size_t num_dirty_cards() { return _num_dirty_cards; } 1605 }; 1606 1607 // This closure is used in the non-product build to check 1608 // that there are no MemRegions with a certain property. 1609 class FalseMemRegionClosure: public MemRegionClosure { 1610 void do_MemRegion(MemRegion mr) { 1611 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1612 guarantee(false, "Should never be here"); 1613 } 1614 }; 1615 1616 // This closure is used during the precleaning phase 1617 // to "carefully" rescan marked objects on dirty cards. 1618 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1619 // to accomplish some of its work. 1620 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1621 CMSCollector* _collector; 1622 MemRegion _span; 1623 bool _yield; 1624 Mutex* _freelistLock; 1625 CMSBitMap* _bitMap; 1626 CMSMarkStack* _markStack; 1627 MarkRefsIntoAndScanClosure* _scanningClosure; 1628 1629 public: 1630 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1631 MemRegion span, 1632 CMSBitMap* bitMap, 1633 CMSMarkStack* markStack, 1634 MarkRefsIntoAndScanClosure* cl, 1635 bool should_yield): 1636 _collector(collector), 1637 _span(span), 1638 _yield(should_yield), 1639 _bitMap(bitMap), 1640 _markStack(markStack), 1641 _scanningClosure(cl) { 1642 } 1643 1644 void do_object(oop p) { 1645 guarantee(false, "call do_object_careful instead"); 1646 } 1647 1648 size_t do_object_careful(oop p) { 1649 guarantee(false, "Unexpected caller"); 1650 return 0; 1651 } 1652 1653 size_t do_object_careful_m(oop p, MemRegion mr); 1654 1655 void setFreelistLock(Mutex* m) { 1656 _freelistLock = m; 1657 _scanningClosure->set_freelistLock(m); 1658 } 1659 1660 private: 1661 inline bool do_yield_check(); 1662 1663 void do_yield_work(); 1664 }; 1665 1666 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1667 CMSCollector* _collector; 1668 MemRegion _span; 1669 bool _yield; 1670 CMSBitMap* _bit_map; 1671 CMSMarkStack* _mark_stack; 1672 PushAndMarkClosure* _scanning_closure; 1673 unsigned int _before_count; 1674 1675 public: 1676 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1677 MemRegion span, 1678 CMSBitMap* bit_map, 1679 CMSMarkStack* mark_stack, 1680 PushAndMarkClosure* cl, 1681 unsigned int before_count, 1682 bool should_yield): 1683 _collector(collector), 1684 _span(span), 1685 _yield(should_yield), 1686 _bit_map(bit_map), 1687 _mark_stack(mark_stack), 1688 _scanning_closure(cl), 1689 _before_count(before_count) 1690 { } 1691 1692 void do_object(oop p) { 1693 guarantee(false, "call do_object_careful instead"); 1694 } 1695 1696 size_t do_object_careful(oop p); 1697 1698 size_t do_object_careful_m(oop p, MemRegion mr) { 1699 guarantee(false, "Unexpected caller"); 1700 return 0; 1701 } 1702 1703 private: 1704 inline void do_yield_check(); 1705 void do_yield_work(); 1706 }; 1707 1708 // This closure is used to accomplish the sweeping work 1709 // after the second checkpoint but before the concurrent reset 1710 // phase. 1711 // 1712 // Terminology 1713 // left hand chunk (LHC) - block of one or more chunks currently being 1714 // coalesced. The LHC is available for coalescing with a new chunk. 1715 // right hand chunk (RHC) - block that is currently being swept that is 1716 // free or garbage that can be coalesced with the LHC. 1717 // _inFreeRange is true if there is currently a LHC 1718 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1719 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1720 // _freeFinger is the address of the current LHC 1721 class SweepClosure: public BlkClosureCareful { 1722 CMSCollector* _collector; // collector doing the work 1723 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1724 CompactibleFreeListSpace* _sp; // Space being swept 1725 HeapWord* _limit;// the address at or above which the sweep should stop 1726 // because we do not expect newly garbage blocks 1727 // eligible for sweeping past that address. 1728 Mutex* _freelistLock; // Free list lock (in space) 1729 CMSBitMap* _bitMap; // Marking bit map (in 1730 // generation) 1731 bool _inFreeRange; // Indicates if we are in the 1732 // midst of a free run 1733 bool _freeRangeInFreeLists; 1734 // Often, we have just found 1735 // a free chunk and started 1736 // a new free range; we do not 1737 // eagerly remove this chunk from 1738 // the free lists unless there is 1739 // a possibility of coalescing. 1740 // When true, this flag indicates 1741 // that the _freeFinger below 1742 // points to a potentially free chunk 1743 // that may still be in the free lists 1744 bool _lastFreeRangeCoalesced; 1745 // free range contains chunks 1746 // coalesced 1747 bool _yield; 1748 // Whether sweeping should be 1749 // done with yields. For instance 1750 // when done by the foreground 1751 // collector we shouldn't yield. 1752 HeapWord* _freeFinger; // When _inFreeRange is set, the 1753 // pointer to the "left hand 1754 // chunk" 1755 size_t _freeRangeSize; 1756 // When _inFreeRange is set, this 1757 // indicates the accumulated size 1758 // of the "left hand chunk" 1759 NOT_PRODUCT( 1760 size_t _numObjectsFreed; 1761 size_t _numWordsFreed; 1762 size_t _numObjectsLive; 1763 size_t _numWordsLive; 1764 size_t _numObjectsAlreadyFree; 1765 size_t _numWordsAlreadyFree; 1766 FreeChunk* _last_fc; 1767 ) 1768 private: 1769 // Code that is common to a free chunk or garbage when 1770 // encountered during sweeping. 1771 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1772 // Process a free chunk during sweeping. 1773 void do_already_free_chunk(FreeChunk *fc); 1774 // Work method called when processing an already free or a 1775 // freshly garbage chunk to do a lookahead and possibly a 1776 // premptive flush if crossing over _limit. 1777 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1778 // Process a garbage chunk during sweeping. 1779 size_t do_garbage_chunk(FreeChunk *fc); 1780 // Process a live chunk during sweeping. 1781 size_t do_live_chunk(FreeChunk* fc); 1782 1783 // Accessors. 1784 HeapWord* freeFinger() const { return _freeFinger; } 1785 void set_freeFinger(HeapWord* v) { _freeFinger = v; } 1786 bool inFreeRange() const { return _inFreeRange; } 1787 void set_inFreeRange(bool v) { _inFreeRange = v; } 1788 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } 1789 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } 1790 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } 1791 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1792 1793 // Initialize a free range. 1794 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1795 // Return this chunk to the free lists. 1796 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1797 1798 // Check if we should yield and do so when necessary. 1799 inline void do_yield_check(HeapWord* addr); 1800 1801 // Yield 1802 void do_yield_work(HeapWord* addr); 1803 1804 // Debugging/Printing 1805 void print_free_block_coalesced(FreeChunk* fc) const; 1806 1807 public: 1808 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1809 CMSBitMap* bitMap, bool should_yield); 1810 ~SweepClosure() PRODUCT_RETURN; 1811 1812 size_t do_blk_careful(HeapWord* addr); 1813 void print() const { print_on(tty); } 1814 void print_on(outputStream *st) const; 1815 }; 1816 1817 // Closures related to weak references processing 1818 1819 // During CMS' weak reference processing, this is a 1820 // work-routine/closure used to complete transitive 1821 // marking of objects as live after a certain point 1822 // in which an initial set has been completely accumulated. 1823 // This closure is currently used both during the final 1824 // remark stop-world phase, as well as during the concurrent 1825 // precleaning of the discovered reference lists. 1826 class CMSDrainMarkingStackClosure: public VoidClosure { 1827 CMSCollector* _collector; 1828 MemRegion _span; 1829 CMSMarkStack* _mark_stack; 1830 CMSBitMap* _bit_map; 1831 CMSKeepAliveClosure* _keep_alive; 1832 bool _concurrent_precleaning; 1833 public: 1834 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1835 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1836 CMSKeepAliveClosure* keep_alive, 1837 bool cpc): 1838 _collector(collector), 1839 _span(span), 1840 _bit_map(bit_map), 1841 _mark_stack(mark_stack), 1842 _keep_alive(keep_alive), 1843 _concurrent_precleaning(cpc) { 1844 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1845 "Mismatch"); 1846 } 1847 1848 void do_void(); 1849 }; 1850 1851 // A parallel version of CMSDrainMarkingStackClosure above. 1852 class CMSParDrainMarkingStackClosure: public VoidClosure { 1853 CMSCollector* _collector; 1854 MemRegion _span; 1855 OopTaskQueue* _work_queue; 1856 CMSBitMap* _bit_map; 1857 CMSInnerParMarkAndPushClosure _mark_and_push; 1858 1859 public: 1860 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1861 MemRegion span, CMSBitMap* bit_map, 1862 OopTaskQueue* work_queue): 1863 _collector(collector), 1864 _span(span), 1865 _bit_map(bit_map), 1866 _work_queue(work_queue), 1867 _mark_and_push(collector, span, bit_map, work_queue) { } 1868 1869 public: 1870 void trim_queue(uint max); 1871 void do_void(); 1872 }; 1873 1874 // Allow yielding or short-circuiting of reference list 1875 // prelceaning work. 1876 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1877 CMSCollector* _collector; 1878 void do_yield_work(); 1879 public: 1880 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1881 _collector(collector) {} 1882 virtual bool should_return(); 1883 }; 1884 1885 1886 // Convenience class that locks free list locks for given CMS collector 1887 class FreelistLocker: public StackObj { 1888 private: 1889 CMSCollector* _collector; 1890 public: 1891 FreelistLocker(CMSCollector* collector): 1892 _collector(collector) { 1893 _collector->getFreelistLocks(); 1894 } 1895 1896 ~FreelistLocker() { 1897 _collector->releaseFreelistLocks(); 1898 } 1899 }; 1900 1901 // Mark all dead objects in a given space. 1902 class MarkDeadObjectsClosure: public BlkClosure { 1903 const CMSCollector* _collector; 1904 const CompactibleFreeListSpace* _sp; 1905 CMSBitMap* _live_bit_map; 1906 CMSBitMap* _dead_bit_map; 1907 public: 1908 MarkDeadObjectsClosure(const CMSCollector* collector, 1909 const CompactibleFreeListSpace* sp, 1910 CMSBitMap *live_bit_map, 1911 CMSBitMap *dead_bit_map) : 1912 _collector(collector), 1913 _sp(sp), 1914 _live_bit_map(live_bit_map), 1915 _dead_bit_map(dead_bit_map) {} 1916 size_t do_blk(HeapWord* addr); 1917 }; 1918 1919 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1920 1921 public: 1922 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1923 }; 1924 1925 1926 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP