< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 59422 : 8245754: Shenandoah: ditch ShenandoahAlwaysPreTouch
Reviewed-by: XXX


 138 
 139       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 140 
 141       r = _regions.next();
 142     }
 143   }
 144 };
 145 
 146 jint ShenandoahHeap::initialize() {
 147   //
 148   // Figure out heap sizing
 149   //
 150 
 151   size_t init_byte_size = InitialHeapSize;
 152   size_t min_byte_size  = MinHeapSize;
 153   size_t max_byte_size  = MaxHeapSize;
 154   size_t heap_alignment = HeapAlignment;
 155 
 156   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 157 
 158   if (ShenandoahAlwaysPreTouch) {
 159     // Enabled pre-touch means the entire heap is committed right away.
 160     init_byte_size = max_byte_size;
 161   }
 162 
 163   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 164   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 165 
 166   _num_regions = ShenandoahHeapRegion::region_count();
 167 
 168   // Now we know the number of regions, initialize the heuristics.
 169   initialize_heuristics();
 170 
 171   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 172   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 173   assert(num_committed_regions <= _num_regions, "sanity");
 174   _initial_size = num_committed_regions * reg_size_bytes;
 175 
 176   size_t num_min_regions = min_byte_size / reg_size_bytes;
 177   num_min_regions = MIN2(num_min_regions, _num_regions);
 178   assert(num_min_regions <= _num_regions, "sanity");


 327 
 328     for (size_t i = 0; i < _num_regions; i++) {
 329       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 330       bool is_committed = i < num_committed_regions;
 331       void* loc = region_storage.base() + i * region_align;
 332 
 333       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 334       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 335 
 336       _marking_context->initialize_top_at_mark_start(r);
 337       _regions[i] = r;
 338       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 339     }
 340 
 341     // Initialize to complete
 342     _marking_context->mark_complete();
 343 
 344     _free_set->rebuild();
 345   }
 346 
 347   if (ShenandoahAlwaysPreTouch) {
 348     assert(!AlwaysPreTouch, "Should have been overridden");
 349 
 350     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 351     // before initialize() below zeroes it with initializing thread. For any given region,
 352     // we touch the region and the corresponding bitmaps from the same thread.
 353     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 354 
 355     size_t pretouch_heap_page_size = heap_page_size;
 356     size_t pretouch_bitmap_page_size = bitmap_page_size;
 357 
 358 #ifdef LINUX
 359     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 360     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 361     // them into huge one. Therefore, we need to pretouch with smaller pages.
 362     if (UseTransparentHugePages) {
 363       pretouch_heap_page_size = (size_t)os::vm_page_size();
 364       pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 365     }
 366 #endif
 367 
 368     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 369     // simpler by pre-touching continuous spaces (heap and bitmap) separately.




 138 
 139       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 140 
 141       r = _regions.next();
 142     }
 143   }
 144 };
 145 
 146 jint ShenandoahHeap::initialize() {
 147   //
 148   // Figure out heap sizing
 149   //
 150 
 151   size_t init_byte_size = InitialHeapSize;
 152   size_t min_byte_size  = MinHeapSize;
 153   size_t max_byte_size  = MaxHeapSize;
 154   size_t heap_alignment = HeapAlignment;
 155 
 156   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 157 
 158   if (AlwaysPreTouch) {
 159     // Enabled pre-touch means the entire heap is committed right away.
 160     init_byte_size = max_byte_size;
 161   }
 162 
 163   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 164   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 165 
 166   _num_regions = ShenandoahHeapRegion::region_count();
 167 
 168   // Now we know the number of regions, initialize the heuristics.
 169   initialize_heuristics();
 170 
 171   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 172   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 173   assert(num_committed_regions <= _num_regions, "sanity");
 174   _initial_size = num_committed_regions * reg_size_bytes;
 175 
 176   size_t num_min_regions = min_byte_size / reg_size_bytes;
 177   num_min_regions = MIN2(num_min_regions, _num_regions);
 178   assert(num_min_regions <= _num_regions, "sanity");


 327 
 328     for (size_t i = 0; i < _num_regions; i++) {
 329       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 330       bool is_committed = i < num_committed_regions;
 331       void* loc = region_storage.base() + i * region_align;
 332 
 333       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 334       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 335 
 336       _marking_context->initialize_top_at_mark_start(r);
 337       _regions[i] = r;
 338       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 339     }
 340 
 341     // Initialize to complete
 342     _marking_context->mark_complete();
 343 
 344     _free_set->rebuild();
 345   }
 346 
 347   if (AlwaysPreTouch) {


 348     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 349     // before initialize() below zeroes it with initializing thread. For any given region,
 350     // we touch the region and the corresponding bitmaps from the same thread.
 351     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 352 
 353     size_t pretouch_heap_page_size = heap_page_size;
 354     size_t pretouch_bitmap_page_size = bitmap_page_size;
 355 
 356 #ifdef LINUX
 357     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 358     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 359     // them into huge one. Therefore, we need to pretouch with smaller pages.
 360     if (UseTransparentHugePages) {
 361       pretouch_heap_page_size = (size_t)os::vm_page_size();
 362       pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 363     }
 364 #endif
 365 
 366     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 367     // simpler by pre-touching continuous spaces (heap and bitmap) separately.


< prev index next >