< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7994 : [mq]: filter

*** 3470,3503 **** size_t _total_humongous; size_t _candidate_humongous; DirtyCardQueue _dcq; bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const { assert(region->is_starts_humongous(), "Must start a humongous object"); ! if (heap->mark_in_progress() && ! (region->bottom() < region->next_top_at_mark_start())) { ! // While concurrent marking is in progress, disallow eager ! // reclaim of humongous objects that existed at the start of the ! // marking cycle. For objects containing references, this ! // avoids SATB violations; such objects must be scanned. This ! // also avoids problems when eagerly reclaiming an object that ! // has been marked and placed in the mark stack, but has not yet ! // been scanned. ! return false; ! } else if (!oop(region->bottom())->is_typeArray()) { ! // For now, only permit reclaiming of humongous is_typeArray() ! // objects. For objects containing references, there is more ! // work to be done to deal with remembered sets from the object. ! return false; ! } else { ! HeapRegionRemSet* const rset = region->rem_set(); ! if (G1EagerReclaimHumongousObjectsWithStaleRefs) { ! return rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries); } else { ! return rset->is_empty(); ! } } } public: RegisterHumongousWithInCSetFastTestClosure() --- 3470,3523 ---- size_t _total_humongous; size_t _candidate_humongous; DirtyCardQueue _dcq; + // We don't nominate objects with many remembered set entries, on + // the assumption that such objects are likely still live. + bool is_remset_small(HeapRegion* region) const { + HeapRegionRemSet* const rset = region->rem_set(); + return G1EagerReclaimHumongousObjectsWithStaleRefs + ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) + : rset->is_empty(); + } + + bool is_typeArray_region(HeapRegion* region) const { + return oop(region->bottom())->is_typeArray(); + } + bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const { assert(region->is_starts_humongous(), "Must start a humongous object"); ! ! if (!heap->mark_in_progress() ! || (region->bottom() >= region->next_top_at_mark_start())) { ! // In order to maintain SATB invariants, during concurrent mark ! // we should only nominate an object containing references if it ! // was allocated after the start of marking, as such an object ! // doesn't need to have its references scanned. ! // ! // Also, we must not reclaim an object that is in the concurrent ! // mark stack. Objects allocated since the start of marking are ! // never added to the mark stack. ! ! // However, we presently only nominate is_typeArray() objects. ! // A humongous object containing references induces remembered ! // set entries on other regions. In order to reclaim such an ! // object, those remembered sets would need to be cleaned up. ! return is_typeArray_region(region) && is_remset_small(region); ! } else { ! // We may allow nomination of is_typeArray() objects that were ! // allocated before the start of concurrent marking. For this ! // we rely on mark stack insertion to exclude is_typeArray() ! // objects, preventing reclaiming an object that is in the mark ! // stack. Frequent allocation and drop of large binary blobs is ! // an important use case for eager reclaim, and this special ! // handling may reduce needed headroom. ! return G1EagerReclaimHumongousPreSnapshotTypeArrays ! && is_typeArray_region(region) ! && is_remset_small(region); } } public: RegisterHumongousWithInCSetFastTestClosure()
< prev index next >