602 assert(cmsThread() != NULL, "CMS Thread should have been created"); 603 assert(cmsThread()->collector() == this, 604 "CMS Thread should refer to this gen"); 605 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 606 607 // Support for parallelizing young gen rescan 608 GenCollectedHeap* gch = GenCollectedHeap::heap(); 609 assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew"); 610 _young_gen = (ParNewGeneration*)gch->young_gen(); 611 if (gch->supports_inline_contig_alloc()) { 612 _top_addr = gch->top_addr(); 613 _end_addr = gch->end_addr(); 614 assert(_young_gen != NULL, "no _young_gen"); 615 _eden_chunk_index = 0; 616 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; 617 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); 618 } 619 620 // Support for parallelizing survivor space rescan 621 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { 622 const size_t max_plab_samples = 623 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size(); 624 625 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); 626 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC); 627 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); 628 _survivor_chunk_capacity = 2*max_plab_samples; 629 for (uint i = 0; i < ParallelGCThreads; i++) { 630 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); 631 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples); 632 assert(cur->end() == 0, "Should be 0"); 633 assert(cur->array() == vec, "Should be vec"); 634 assert(cur->capacity() == max_plab_samples, "Error"); 635 } 636 } 637 638 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) 639 _gc_counters = new CollectorCounters("CMS", 1); 640 _completed_initialization = true; 641 _inter_sweep_timer.start(); // start of time 642 } 643 644 size_t CMSCollector::plab_sample_minimum_size() { 645 // The default value of MinTLABSize is 2k, but there is 646 // no way to get the default value if the flag has been overridden. 647 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K); 648 } 649 650 const char* ConcurrentMarkSweepGeneration::name() const { 651 return "concurrent mark-sweep generation"; 652 } 653 void ConcurrentMarkSweepGeneration::update_counters() { 654 if (UsePerfData) { 655 _space_counters->update_all(); 656 _gen_counters->update_all(); 657 } 658 } 659 660 // this is an optimized version of update_counters(). it takes the 661 // used value as a parameter rather than computing it. 662 // 663 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { 664 if (UsePerfData) { 665 _space_counters->update_used(used); 666 _space_counters->update_capacity(); 667 _gen_counters->update_all(); | 602 assert(cmsThread() != NULL, "CMS Thread should have been created"); 603 assert(cmsThread()->collector() == this, 604 "CMS Thread should refer to this gen"); 605 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 606 607 // Support for parallelizing young gen rescan 608 GenCollectedHeap* gch = GenCollectedHeap::heap(); 609 assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew"); 610 _young_gen = (ParNewGeneration*)gch->young_gen(); 611 if (gch->supports_inline_contig_alloc()) { 612 _top_addr = gch->top_addr(); 613 _end_addr = gch->end_addr(); 614 assert(_young_gen != NULL, "no _young_gen"); 615 _eden_chunk_index = 0; 616 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; 617 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); 618 } 619 620 // Support for parallelizing survivor space rescan 621 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { 622 // The 2*K (default MinTLABSize) is large enough to allow smooth striping of work 623 // and avoids being linked to unusual MinTLABSize set on the command line 624 const size_t max_plab_samples = ((DefNewGeneration*)_young_gen)->max_survivor_size() / (2 * K); 625 626 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); 627 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC); 628 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); 629 _survivor_chunk_capacity = 2*max_plab_samples; 630 for (uint i = 0; i < ParallelGCThreads; i++) { 631 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); 632 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples); 633 assert(cur->end() == 0, "Should be 0"); 634 assert(cur->array() == vec, "Should be vec"); 635 assert(cur->capacity() == max_plab_samples, "Error"); 636 } 637 } 638 639 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) 640 _gc_counters = new CollectorCounters("CMS", 1); 641 _completed_initialization = true; 642 _inter_sweep_timer.start(); // start of time 643 } 644 645 const char* ConcurrentMarkSweepGeneration::name() const { 646 return "concurrent mark-sweep generation"; 647 } 648 void ConcurrentMarkSweepGeneration::update_counters() { 649 if (UsePerfData) { 650 _space_counters->update_all(); 651 _gen_counters->update_all(); 652 } 653 } 654 655 // this is an optimized version of update_counters(). it takes the 656 // used value as a parameter rather than computing it. 657 // 658 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { 659 if (UsePerfData) { 660 _space_counters->update_used(used); 661 _space_counters->update_capacity(); 662 _gen_counters->update_all(); |