< prev index next >
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Print this page
rev 58105 : [mq]: 8236073-softmaxheapsize
*** 1059,1069 ****
// Prepare heap for normal collections.
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false /* free_list_only */);
abort_refinement();
! resize_heap_if_necessary();
// Rebuild the strong code root lists for each region
rebuild_strong_code_roots();
// Purge code root memory
--- 1059,1070 ----
// Prepare heap for normal collections.
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false /* free_list_only */);
abort_refinement();
!
! resize_heap_after_full_gc();
// Rebuild the strong code root lists for each region
rebuild_strong_code_roots();
// Purge code root memory
*** 1163,1207 ****
// out by the GC locker). So, right now, we'll ignore the return value.
bool dummy = do_full_collection(true, /* explicit_gc */
clear_all_soft_refs);
}
! void G1CollectedHeap::resize_heap_if_necessary() {
assert_at_safepoint_on_vm_thread();
// Capacity, free and used after the GC counted as full regions to
// include the waste in the following calculations.
const size_t capacity_after_gc = capacity();
const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
! // This is enforced in arguments.cpp.
! assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
! "otherwise the code below doesn't make sense");
!
! // We don't have floating point command-line arguments
! const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
! const double maximum_used_percentage = 1.0 - minimum_free_percentage;
! const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
! const double minimum_used_percentage = 1.0 - maximum_free_percentage;
!
! // We have to be careful here as these two calculations can overflow
! // 32-bit size_t's.
! double used_after_gc_d = (double) used_after_gc;
! double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
! double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
!
! // Let's make sure that they are both under the max heap size, which
! // by default will make them fit into a size_t.
! double desired_capacity_upper_bound = (double) MaxHeapSize;
! minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
! desired_capacity_upper_bound);
! maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
! desired_capacity_upper_bound);
!
! // We can now safely turn them into size_t's.
! size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
! size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
// This assert only makes sense here, before we adjust them
// with respect to the min and max heap size.
assert(minimum_desired_capacity <= maximum_desired_capacity,
"minimum_desired_capacity = " SIZE_FORMAT ", "
--- 1164,1184 ----
// out by the GC locker). So, right now, we'll ignore the return value.
bool dummy = do_full_collection(true, /* explicit_gc */
clear_all_soft_refs);
}
! void G1CollectedHeap::resize_heap_after_full_gc() {
assert_at_safepoint_on_vm_thread();
+ assert(collector_state()->in_full_gc(), "Must be");
// Capacity, free and used after the GC counted as full regions to
// include the waste in the following calculations.
const size_t capacity_after_gc = capacity();
const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
! size_t minimum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MinHeapFreeRatio);
! size_t maximum_desired_capacity = _heap_sizing_policy->target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
// This assert only makes sense here, before we adjust them
// with respect to the min and max heap size.
assert(minimum_desired_capacity <= maximum_desired_capacity,
"minimum_desired_capacity = " SIZE_FORMAT ", "
*** 2422,2431 ****
--- 2399,2412 ----
size_t G1CollectedHeap::max_reserved_capacity() const {
return _hrm->max_length() * HeapRegion::GrainBytes;
}
+ size_t G1CollectedHeap::soft_max_capacity() const {
+ return clamp(align_up(SoftMaxHeapSize, HeapAlignment), MinHeapSize, max_capacity());
+ }
+
jlong G1CollectedHeap::millis_since_last_gc() {
// See the notes in GenCollectedHeap::millis_since_last_gc()
// for more information about the implementation.
jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
_policy->collection_pause_end_millis();
*** 2946,2965 ****
_verifier->verify_after_gc(type);
_verifier->check_bitmaps("GC End");
verify_numa_regions("GC End");
}
! void G1CollectedHeap::expand_heap_after_young_collection(){
! size_t expand_bytes = _heap_sizing_policy->expansion_amount();
if (expand_bytes > 0) {
! // No need for an ergo logging here,
! // expansion_amount() does this when it returns a value > 0.
! double expand_ms;
! if (!expand(expand_bytes, _workers, &expand_ms)) {
// We failed to expand the heap. Cannot do anything about it.
}
! phase_times()->record_expand_heap_time(expand_ms);
}
}
const char* G1CollectedHeap::young_gc_name() const {
if (collector_state()->in_initial_mark_gc()) {
--- 2927,2964 ----
_verifier->verify_after_gc(type);
_verifier->check_bitmaps("GC End");
verify_numa_regions("GC End");
}
! void G1CollectedHeap::resize_heap_after_young_collection() {
! Ticks start = Ticks::now();
! if (!expand_heap_after_young_collection()) {
! // If we don't attempt to expand heap, try if we need to shrink the heap
! shrink_heap_after_young_collection();
! }
! phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
! }
!
! bool G1CollectedHeap::expand_heap_after_young_collection(){
! size_t expand_bytes = _heap_sizing_policy->expansion_amount_after_young_collection();
if (expand_bytes > 0) {
! if (expand(expand_bytes, _workers, NULL)) {
// We failed to expand the heap. Cannot do anything about it.
}
! return true;
! }
! return false;
! }
!
! void G1CollectedHeap::shrink_heap_after_young_collection() {
! if (collector_state()->in_young_only_phase() || policy()->next_gc_should_be_mixed()) {
! // Do the shrink during gc only at the end of mixed gc phase
! return;
! }
! size_t shrink_bytes = _heap_sizing_policy->shrink_amount_at_last_mixed_gc(policy()->desired_bytes_after_concurrent_mark());
! if (shrink_bytes > 0) {
! shrink(shrink_bytes);
}
}
const char* G1CollectedHeap::young_gc_name() const {
if (collector_state()->in_initial_mark_gc()) {
*** 3124,3134 ****
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
! expand_heap_after_young_collection();
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms);
}
--- 3123,3133 ----
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
! resize_heap_after_young_collection();
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms);
}
< prev index next >