< prev index next >

src/share/vm/memory/genCollectedHeap.cpp

Print this page

        

*** 88,100 **** } jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize(); - _n_gens = gen_policy()->number_of_generations(); - assert(_n_gens == 2, "There is no support for more than two generations"); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize). --- 88,97 ----
*** 198,209 **** return _young_gen->used() + _old_gen->used(); } // Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions(int level) { ! assert(level >= 0, "Illegal level parameter"); ! assert(level < _n_gens, "Illegal level parameter"); if (level == 1) { _old_gen->save_used_region(); } _young_gen->save_used_region(); } --- 195,205 ---- return _young_gen->used() + _old_gen->used(); } // Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions(int level) { ! assert(level == 0 || level == 1, "Illegal level parameter"); if (level == 1) { _old_gen->save_used_region(); } _young_gen->save_used_region(); }
*** 415,425 **** my_thread->is_ConcurrentGC_thread(), "incorrect thread type capability"); assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); guarantee(!is_gc_active(), "collection is not reentrant"); - assert(max_level < n_gens(), "sanity check"); if (GC_locker::check_active_before_gc()) { return; // GC is disabled (e.g. JNI GetXXXCritical operation) } --- 411,420 ----
*** 433,443 **** print_heap_before_gc(); { FlagSetting fl(_is_gc_active, true); ! bool complete = full && (max_level == (n_gens()-1)); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); --- 428,438 ---- print_heap_before_gc(); { FlagSetting fl(_is_gc_active, true); ! bool complete = full && (max_level == 1 /* old */); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
*** 505,515 **** } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || (max_level_collected == n_gens() - 1); if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps } --- 500,510 ---- } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || (max_level_collected == 1 /* old */); if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps }
*** 522,532 **** MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (max_level_collected == 1) { _old_gen->compute_new_size(); } _young_gen->compute_new_size(); if (complete) { --- 517,527 ---- MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (max_level_collected == 1 /* old */) { _old_gen->compute_new_size(); } _young_gen->compute_new_size(); if (complete) {
*** 769,791 **** #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, 0); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, 0); } else { // Stop-the-world full collection ! collect(cause, n_gens() - 1); } #else // Stop-the-world full collection ! collect(cause, n_gens() - 1); #endif } } void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { --- 764,786 ---- #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, 0 /* young */); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, 0 /* young */); } else { // Stop-the-world full collection ! collect(cause, 1 /* old */); } #else // Stop-the-world full collection ! collect(cause, 1 /* old */); #endif } } void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
*** 796,806 **** } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, n_gens() - 1); } // this is the private collection interface // The Heap_lock is expected to be held on entry. --- 791,801 ---- } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, 1 /* old */); } // this is the private collection interface // The Heap_lock is expected to be held on entry.
*** 852,862 **** } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, _n_gens - 1); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, int max_level) { int local_max_level; --- 847,857 ---- } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, 1 /* old */); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, int max_level) { int local_max_level;
*** 884,894 **** // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! n_gens() - 1 /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { bool result = ((HeapWord*)p) < _old_gen->reserved().start(); --- 879,889 ---- // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! 1 /* old */ /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { bool result = ((HeapWord*)p) < _old_gen->reserved().start();
*** 1110,1122 **** assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); return _gch; } - void GenCollectedHeap::prepare_for_compaction() { - guarantee(_n_gens = 2, "Wrong number of generations"); // Start by compacting into same gen. CompactPoint cp(_old_gen); _old_gen->prepare_for_compaction(&cp); _young_gen->prepare_for_compaction(&cp); } --- 1105,1115 ----
< prev index next >