--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-06-14 16:39:38.423497273 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-06-14 16:39:38.151486536 +0200 @@ -51,7 +51,7 @@ #include "gc/g1/g1RemSet.inline.hpp" #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" -#include "gc/g1/g1SerialCollector.hpp" +#include "gc/g1/g1SerialFullCollector.hpp" #include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1YCTypes.hpp" #include "gc/g1/heapRegion.inline.hpp" @@ -1136,25 +1136,48 @@ collector_state()->set_gcs_are_young(true); } -void G1CollectedHeap::reset_card_cache_and_queue() { +void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) { + assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); + assert(used() == recalculate_used(), "Should be equal"); + _verifier->verify_region_sets_optional(); + _verifier->verify_before_gc(); + _verifier->check_bitmaps("Full GC Start"); +} + +void G1CollectedHeap::prepare_heap_for_mutators() { + // Delete metaspaces for unloaded class loaders and clean up loader_data graph + ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); + + // Prepare heap for normal collections. + assert(num_free_regions() == 0, "we should not have added any free regions"); + rebuild_region_sets(false /* free_list_only */); + abort_refinement(); + resize_if_necessary_after_full_collection(); + + // Rebuild the strong code root lists for each region + rebuild_strong_code_roots(); + + // Start a new incremental collection set for the next pause + start_new_collection_set(); + + _allocator->init_mutator_alloc_region(); + + // Post collection state updates. + MetaspaceGC::compute_new_size(); +} + +void G1CollectedHeap::abort_refinement() { if (_hot_card_cache->use_cache()) { _hot_card_cache->reset_card_counts(); _hot_card_cache->reset_hot_cache(); } - // Discard all stale remembered set updates. + // Discard all remembered set updates. JavaThread::dirty_card_queue_set().abandon_logs(); assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty"); } -void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) { - assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); - assert(used() == recalculate_used(), "Should be equal"); - _verifier->verify_region_sets_optional(); - _verifier->verify_before_gc(); - _verifier->check_bitmaps("Full GC Start"); -} - void G1CollectedHeap::verify_after_full_collection() { check_gc_time_stamps(); _hrm.verify_optional(); @@ -1189,9 +1212,18 @@ ref_processor_cm()->verify_no_references_recorded(); } +void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) { + print_hrm_post_compaction(); + heap_transition->print(); + print_heap_after_gc(); + print_heap_regions(); +#ifdef TRACESPINNING + ParallelTaskTerminator::print_termination_counts(); +#endif +} + void G1CollectedHeap::do_full_collection_inner(G1FullGCScope* scope) { GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); - G1HeapTransition heap_transition(this); g1_policy()->record_full_collection_start(); print_heap_before_gc(); @@ -1203,35 +1235,15 @@ gc_prologue(true); prepare_heap_for_full_collection(); - G1SerialCollector serial(scope, ref_processor_stw()); + G1SerialFullCollector serial(scope, ref_processor_stw()); serial.prepare_collection(); serial.collect(); serial.complete_collection(); - assert(num_free_regions() == 0, "we should not have added any free regions"); - MemoryService::track_memory_usage(); + prepare_heap_for_mutators(); - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(); - MetaspaceAux::verify_metrics(); - - // Prepare heap for normal collections. - rebuild_region_sets(false /* free_list_only */); - reset_card_cache_and_queue(); - resize_if_necessary_after_full_collection(); - - // Rebuild the strong code root lists for each region - rebuild_strong_code_roots(); - - // Start a new incremental collection set for the next pause - start_new_collection_set(); - - _allocator->init_mutator_alloc_region(); - - // Post collection state updates. - MetaspaceGC::compute_new_size(); - gc_epilogue(true); g1_policy()->record_full_collection_end(); + gc_epilogue(true); // Post collection verification. verify_after_full_collection(); @@ -1240,13 +1252,7 @@ // We should do this after we potentially resize the heap so // that all the COMMIT / UNCOMMIT events are generated before // the compaction events. - print_hrm_post_compaction(); - heap_transition.print(); - print_heap_after_gc(); - print_heap_regions(); -#ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); -#endif + print_heap_after_full_collection(scope->heap_transition()); } bool G1CollectedHeap::do_full_collection(bool explicit_gc, @@ -2607,6 +2613,7 @@ allocation_context_stats().update(full); + MemoryService::track_memory_usage(); // We have just completed a GC. Update the soft reference // policy with the new heap occupancy Universe::update_heap_info_at_gc(); @@ -3157,8 +3164,6 @@ evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before()); evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); - MemoryService::track_memory_usage(); - // In prepare_for_verify() below we'll need to scan the deferred // update buffers to bring the RSets up-to-date if // G1HRRSFlushLogBuffersOnVerify has been set. While scanning