1761 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1762 SharedHeap(policy_),
1763 _g1_policy(policy_),
1764 _dirty_card_queue_set(false),
1765 _into_cset_dirty_card_queue_set(false),
1766 _is_alive_closure_cm(this),
1767 _is_alive_closure_stw(this),
1768 _ref_processor_cm(NULL),
1769 _ref_processor_stw(NULL),
1770 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771 _bot_shared(NULL),
1772 _evac_failure_scan_stack(NULL),
1773 _mark_in_progress(false),
1774 _cg1r(NULL),
1775 _g1mm(NULL),
1776 _refine_cte_cl(NULL),
1777 _full_collection(false),
1778 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781 _humongous_is_live(),
1782 _has_humongous_reclaim_candidates(false),
1783 _free_regions_coming(false),
1784 _young_list(new YoungList(this)),
1785 _gc_time_stamp(0),
1786 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787 _old_plab_stats(OldPLABSize, PLABWeight),
1788 _expand_heap_after_alloc_failure(true),
1789 _surviving_young_words(NULL),
1790 _old_marking_cycles_started(0),
1791 _old_marking_cycles_completed(0),
1792 _concurrent_cycle_started(false),
1793 _heap_summary_sent(false),
1794 _in_cset_fast_test(),
1795 _dirty_cards_region_list(NULL),
1796 _worker_cset_start_region(NULL),
1797 _worker_cset_start_region_time_stamp(NULL),
1798 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1955 g1_barrier_set()->initialize(cardtable_storage);
1956 // Do later initialization work for concurrent refinement.
1957 _cg1r->init(card_counts_storage);
1958
1959 // 6843694 - ensure that the maximum region index can fit
1960 // in the remembered set structures.
1961 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1962 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1963
1964 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1965 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1966 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1967 "too many cards per region");
1968
1969 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1970
1971 _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
1972
1973 _g1h = this;
1974
1975 _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1976 _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1977
1978 // Create the ConcurrentMark data structure and thread.
1979 // (Must do this late, so that "max_regions" is defined.)
1980 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1981 if (_cm == NULL || !_cm->completed_initialization()) {
1982 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
1983 return JNI_ENOMEM;
1984 }
1985 _cmThread = _cm->cmThread();
1986
1987 // Initialize the from_card cache structure of HeapRegionRemSet.
1988 HeapRegionRemSet::init_heap(max_regions());
1989
1990 // Now expand into the initial heap size.
1991 if (!expand(init_byte_size)) {
1992 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1993 return JNI_ENOMEM;
1994 }
1995
1996 // Perform any initialization actions delegated to the policy.
2044 // Do create of the monitoring and management support so that
2045 // values in the heap have been properly initialized.
2046 _g1mm = new G1MonitoringSupport(this);
2047
2048 G1StringDedup::initialize();
2049
2050 return JNI_OK;
2051 }
2052
2053 void G1CollectedHeap::stop() {
2054 // Stop all concurrent threads. We do this to make sure these threads
2055 // do not continue to execute and access resources (e.g. gclog_or_tty)
2056 // that are destroyed during shutdown.
2057 _cg1r->stop();
2058 _cmThread->stop();
2059 if (G1StringDedup::is_enabled()) {
2060 G1StringDedup::stop();
2061 }
2062 }
2063
2064 void G1CollectedHeap::clear_humongous_is_live_table() {
2065 guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2066 _humongous_is_live.clear();
2067 }
2068
2069 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2070 return HeapRegion::max_region_size();
2071 }
2072
2073 void G1CollectedHeap::ref_processing_init() {
2074 // Reference processing in G1 currently works as follows:
2075 //
2076 // * There are two reference processor instances. One is
2077 // used to record and process discovered references
2078 // during concurrent marking; the other is used to
2079 // record and process references during STW pauses
2080 // (both full and incremental).
2081 // * Both ref processors need to 'span' the entire heap as
2082 // the regions in the collection set may be dotted around.
2083 //
2084 // * For the concurrent marking ref processor:
2085 // * Reference discovery is enabled at initial marking.
2086 // * Reference discovery is disabled and the discovered
2087 // references processed etc during remarking.
2088 // * Reference discovery is MT (see below).
3451 JavaThread *curr = Threads::first();
3452 while (curr != NULL) {
3453 DirtyCardQueue& dcq = curr->dirty_card_queue();
3454 extra_cards += dcq.size();
3455 curr = curr->next();
3456 }
3457 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3458 size_t buffer_size = dcqs.buffer_size();
3459 size_t buffer_num = dcqs.completed_buffers_num();
3460
3461 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3462 // in bytes - not the number of 'entries'. We need to convert
3463 // into a number of cards.
3464 return (buffer_size * buffer_num + extra_cards) / oopSize;
3465 }
3466
3467 size_t G1CollectedHeap::cards_scanned() {
3468 return g1_rem_set()->cardsScanned();
3469 }
3470
3471 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3472 HeapRegion* region = region_at(index);
3473 assert(region->is_starts_humongous(), "Must start a humongous object");
3474 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3475 }
3476
3477 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3478 private:
3479 size_t _total_humongous;
3480 size_t _candidate_humongous;
3481
3482 DirtyCardQueue _dcq;
3483
3484 bool humongous_region_is_candidate(uint index) {
3485 HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3486 assert(region->is_starts_humongous(), "Must start a humongous object");
3487 HeapRegionRemSet* const rset = region->rem_set();
3488 bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
3489 return !oop(region->bottom())->is_objArray() &&
3490 ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3491 (!allow_stale_refs && rset->is_empty()));
3492 }
3493
3494 public:
3495 RegisterHumongousWithInCSetFastTestClosure()
3496 : _total_humongous(0),
3497 _candidate_humongous(0),
3498 _dcq(&JavaThread::dirty_card_queue_set()) {
3499 }
3500
3501 virtual bool doHeapRegion(HeapRegion* r) {
3502 if (!r->is_starts_humongous()) {
3503 return false;
3504 }
3505 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3506
3507 uint region_idx = r->hrm_index();
3508 bool is_candidate = humongous_region_is_candidate(region_idx);
3509 // Is_candidate already filters out humongous object with large remembered sets.
3510 // If we have a humongous object with a few remembered sets, we simply flush these
3511 // remembered set entries into the DCQS. That will result in automatic
3512 // re-evaluation of their remembered set entries during the following evacuation
3513 // phase.
3514 if (is_candidate) {
3515 if (!r->rem_set()->is_empty()) {
3516 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3517 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3518 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3519 HeapRegionRemSetIterator hrrs(r->rem_set());
3520 size_t card_index;
3521 while (hrrs.has_next(card_index)) {
3522 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3523 // The remembered set might contain references to already freed
3524 // regions. Filter out such entries to avoid failing card table
3525 // verification.
3526 if (!g1h->heap_region_containing(bs->addr_for(card_ptr))->is_free()) {
3527 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3528 *card_ptr = CardTableModRefBS::dirty_card_val();
3529 _dcq.enqueue(card_ptr);
3530 }
3531 }
3532 }
3533 r->rem_set()->clear_locked();
3534 }
3535 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3536 g1h->register_humongous_region_with_cset(region_idx);
3537 _candidate_humongous++;
3538 }
3539 _total_humongous++;
3540
3541 return false;
3542 }
3543
3544 size_t total_humongous() const { return _total_humongous; }
3545 size_t candidate_humongous() const { return _candidate_humongous; }
3546
3547 void flush_rem_set_entries() { _dcq.flush(); }
3548 };
3549
3550 void G1CollectedHeap::register_humongous_regions_with_cset() {
3551 if (!G1EagerReclaimHumongousObjects) {
3552 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3553 return;
3554 }
3555 double time = os::elapsed_counter();
3556
3557 RegisterHumongousWithInCSetFastTestClosure cl;
3558 heap_region_iterate(&cl);
3559
3560 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3561 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3562 cl.total_humongous(),
3563 cl.candidate_humongous());
3564 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3565
3566 if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
3567 clear_humongous_is_live_table();
3568 }
3569
3570 // Finally flush all remembered set entries to re-check into the global DCQS.
3571 cl.flush_rem_set_entries();
3572 }
3573
3574 void
3575 G1CollectedHeap::setup_surviving_young_words() {
3576 assert(_surviving_young_words == NULL, "pre-condition");
3577 uint array_length = g1_policy()->young_cset_region_length();
3578 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3579 if (_surviving_young_words == NULL) {
3580 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3581 "Not enough space for young surv words summary.");
3582 }
3583 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3584 #ifdef ASSERT
3585 for (uint i = 0; i < array_length; ++i) {
3586 assert( _surviving_young_words[i] == 0, "memset above" );
3587 }
3588 #endif // !ASSERT
3589 }
6166 // remembered set)
6167 // - as soon there is a remembered set entry to the humongous starts region
6168 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6169 // until the end of a concurrent mark.
6170 //
6171 // It is not required to check whether the object has been found dead by marking
6172 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6173 // all objects allocated during that time are considered live.
6174 // SATB marking is even more conservative than the remembered set.
6175 // So if at this point in the collection there is no remembered set entry,
6176 // nobody has a reference to it.
6177 // At the start of collection we flush all refinement logs, and remembered sets
6178 // are completely up-to-date wrt to references to the humongous object.
6179 //
6180 // Other implementation considerations:
6181 // - never consider object arrays at this time because they would pose
6182 // considerable effort for cleaning up the the remembered sets. This is
6183 // required because stale remembered sets might reference locations that
6184 // are currently allocated into.
6185 uint region_idx = r->hrm_index();
6186 if (g1h->humongous_is_live(region_idx) ||
6187 g1h->humongous_region_is_always_live(region_idx)) {
6188
6189 if (G1TraceEagerReclaimHumongousObjects) {
6190 gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6191 region_idx,
6192 obj->size()*HeapWordSize,
6193 r->bottom(),
6194 r->region_num(),
6195 r->rem_set()->occupied(),
6196 r->rem_set()->strong_code_roots_list_length(),
6197 next_bitmap->isMarked(r->bottom()),
6198 g1h->humongous_is_live(region_idx),
6199 obj->is_objArray()
6200 );
6201 }
6202
6203 return false;
6204 }
6205
6206 guarantee(!obj->is_objArray(),
6207 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6208 r->bottom()));
6209
6210 if (G1TraceEagerReclaimHumongousObjects) {
6211 gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6212 region_idx,
6213 obj->size()*HeapWordSize,
6214 r->bottom(),
6215 r->region_num(),
6216 r->rem_set()->occupied(),
6217 r->rem_set()->strong_code_roots_list_length(),
6218 next_bitmap->isMarked(r->bottom()),
6219 g1h->humongous_is_live(region_idx),
6220 obj->is_objArray()
6221 );
6222 }
6223 // Need to clear mark bit of the humongous object if already set.
6224 if (next_bitmap->isMarked(r->bottom())) {
6225 next_bitmap->clear(r->bottom());
6226 }
6227 _freed_bytes += r->used();
6228 r->set_containing_set(NULL);
6229 _humongous_regions_removed.increment(1u, r->capacity());
6230 g1h->free_humongous_region(r, _free_region_list, false);
6231
6232 return false;
6233 }
6234
6235 HeapRegionSetCount& humongous_free_count() {
6236 return _humongous_regions_removed;
6237 }
6238
6239 size_t bytes_freed() const {
6240 return _freed_bytes;
|
1761 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1762 SharedHeap(policy_),
1763 _g1_policy(policy_),
1764 _dirty_card_queue_set(false),
1765 _into_cset_dirty_card_queue_set(false),
1766 _is_alive_closure_cm(this),
1767 _is_alive_closure_stw(this),
1768 _ref_processor_cm(NULL),
1769 _ref_processor_stw(NULL),
1770 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771 _bot_shared(NULL),
1772 _evac_failure_scan_stack(NULL),
1773 _mark_in_progress(false),
1774 _cg1r(NULL),
1775 _g1mm(NULL),
1776 _refine_cte_cl(NULL),
1777 _full_collection(false),
1778 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781 _humongous_reclaim_candidates(),
1782 _has_humongous_reclaim_candidates(false),
1783 _free_regions_coming(false),
1784 _young_list(new YoungList(this)),
1785 _gc_time_stamp(0),
1786 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787 _old_plab_stats(OldPLABSize, PLABWeight),
1788 _expand_heap_after_alloc_failure(true),
1789 _surviving_young_words(NULL),
1790 _old_marking_cycles_started(0),
1791 _old_marking_cycles_completed(0),
1792 _concurrent_cycle_started(false),
1793 _heap_summary_sent(false),
1794 _in_cset_fast_test(),
1795 _dirty_cards_region_list(NULL),
1796 _worker_cset_start_region(NULL),
1797 _worker_cset_start_region_time_stamp(NULL),
1798 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1955 g1_barrier_set()->initialize(cardtable_storage);
1956 // Do later initialization work for concurrent refinement.
1957 _cg1r->init(card_counts_storage);
1958
1959 // 6843694 - ensure that the maximum region index can fit
1960 // in the remembered set structures.
1961 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1962 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1963
1964 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1965 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1966 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1967 "too many cards per region");
1968
1969 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1970
1971 _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
1972
1973 _g1h = this;
1974
1975 _in_cset_fast_test.initialize(
1976 _hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1977 _humongous_reclaim_candidates.initialize(
1978 _hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1979
1980 // Create the ConcurrentMark data structure and thread.
1981 // (Must do this late, so that "max_regions" is defined.)
1982 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1983 if (_cm == NULL || !_cm->completed_initialization()) {
1984 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
1985 return JNI_ENOMEM;
1986 }
1987 _cmThread = _cm->cmThread();
1988
1989 // Initialize the from_card cache structure of HeapRegionRemSet.
1990 HeapRegionRemSet::init_heap(max_regions());
1991
1992 // Now expand into the initial heap size.
1993 if (!expand(init_byte_size)) {
1994 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1995 return JNI_ENOMEM;
1996 }
1997
1998 // Perform any initialization actions delegated to the policy.
2046 // Do create of the monitoring and management support so that
2047 // values in the heap have been properly initialized.
2048 _g1mm = new G1MonitoringSupport(this);
2049
2050 G1StringDedup::initialize();
2051
2052 return JNI_OK;
2053 }
2054
2055 void G1CollectedHeap::stop() {
2056 // Stop all concurrent threads. We do this to make sure these threads
2057 // do not continue to execute and access resources (e.g. gclog_or_tty)
2058 // that are destroyed during shutdown.
2059 _cg1r->stop();
2060 _cmThread->stop();
2061 if (G1StringDedup::is_enabled()) {
2062 G1StringDedup::stop();
2063 }
2064 }
2065
2066 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2067 return HeapRegion::max_region_size();
2068 }
2069
2070 void G1CollectedHeap::ref_processing_init() {
2071 // Reference processing in G1 currently works as follows:
2072 //
2073 // * There are two reference processor instances. One is
2074 // used to record and process discovered references
2075 // during concurrent marking; the other is used to
2076 // record and process references during STW pauses
2077 // (both full and incremental).
2078 // * Both ref processors need to 'span' the entire heap as
2079 // the regions in the collection set may be dotted around.
2080 //
2081 // * For the concurrent marking ref processor:
2082 // * Reference discovery is enabled at initial marking.
2083 // * Reference discovery is disabled and the discovered
2084 // references processed etc during remarking.
2085 // * Reference discovery is MT (see below).
3448 JavaThread *curr = Threads::first();
3449 while (curr != NULL) {
3450 DirtyCardQueue& dcq = curr->dirty_card_queue();
3451 extra_cards += dcq.size();
3452 curr = curr->next();
3453 }
3454 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3455 size_t buffer_size = dcqs.buffer_size();
3456 size_t buffer_num = dcqs.completed_buffers_num();
3457
3458 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3459 // in bytes - not the number of 'entries'. We need to convert
3460 // into a number of cards.
3461 return (buffer_size * buffer_num + extra_cards) / oopSize;
3462 }
3463
3464 size_t G1CollectedHeap::cards_scanned() {
3465 return g1_rem_set()->cardsScanned();
3466 }
3467
3468 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3469 private:
3470 size_t _total_humongous;
3471 size_t _candidate_humongous;
3472
3473 DirtyCardQueue _dcq;
3474
3475 // We don't nominate objects with many remembered set entries, on
3476 // the assumption that such objects are likely still live.
3477 bool is_remset_small(HeapRegion* region) const {
3478 HeapRegionRemSet* const rset = region->rem_set();
3479 return G1EagerReclaimHumongousObjectsWithStaleRefs
3480 ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3481 : rset->is_empty();
3482 }
3483
3484 bool is_typeArray_region(HeapRegion* region) const {
3485 return oop(region->bottom())->is_typeArray();
3486 }
3487
3488 bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3489 assert(region->is_starts_humongous(), "Must start a humongous object");
3490
3491 if (!heap->mark_in_progress()
3492 || (region->bottom() >= region->next_top_at_mark_start())) {
3493 // In order to maintain SATB invariants, during concurrent mark
3494 // we should only nominate an object containing references if it
3495 // was allocated after the start of marking, as such an object
3496 // doesn't need to have its references scanned.
3497 //
3498 // Also, we must not reclaim an object that is in the concurrent
3499 // mark stack. Objects allocated since the start of marking are
3500 // never added to the mark stack.
3501
3502 // However, we presently only nominate is_typeArray() objects.
3503 // A humongous object containing references induces remembered
3504 // set entries on other regions. In order to reclaim such an
3505 // object, those remembered sets would need to be cleaned up.
3506 return is_typeArray_region(region) && is_remset_small(region);
3507
3508 } else {
3509 // We may allow nomination of is_typeArray() objects that were
3510 // allocated before the start of concurrent marking. For this
3511 // we rely on mark stack insertion to exclude is_typeArray()
3512 // objects, preventing reclaiming an object that is in the mark
3513 // stack. Frequent allocation and drop of large binary blobs is
3514 // an important use case for eager reclaim, and this special
3515 // handling may reduce needed headroom.
3516 return G1EagerReclaimHumongousPreSnapshotTypeArrays
3517 && is_typeArray_region(region)
3518 && is_remset_small(region);
3519 }
3520 }
3521
3522 public:
3523 RegisterHumongousWithInCSetFastTestClosure()
3524 : _total_humongous(0),
3525 _candidate_humongous(0),
3526 _dcq(&JavaThread::dirty_card_queue_set()) {
3527 }
3528
3529 virtual bool doHeapRegion(HeapRegion* r) {
3530 if (!r->is_starts_humongous()) {
3531 return false;
3532 }
3533 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3534
3535 if (!humongous_region_is_candidate(g1h, r)) {
3536 g1h->remove_humongous_reclaim_candidate(r->hrm_index());
3537 } else {
3538 // Is_candidate already filters out humongous object with large remembered sets.
3539 // If we have a humongous object with a few remembered sets, we simply flush these
3540 // remembered set entries into the DCQS. That will result in automatic
3541 // re-evaluation of their remembered set entries during the following evacuation
3542 // phase.
3543 if (!r->rem_set()->is_empty()) {
3544 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3545 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3546 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3547 HeapRegionRemSetIterator hrrs(r->rem_set());
3548 size_t card_index;
3549 while (hrrs.has_next(card_index)) {
3550 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3551 // The remembered set might contain references to already freed
3552 // regions. Filter out such entries to avoid failing card table
3553 // verification.
3554 if (!g1h->heap_region_containing(bs->addr_for(card_ptr))->is_free()) {
3555 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3556 *card_ptr = CardTableModRefBS::dirty_card_val();
3557 _dcq.enqueue(card_ptr);
3558 }
3559 }
3560 }
3561 r->rem_set()->clear_locked();
3562 }
3563 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3564 uint rindex = r->hrm_index();
3565 g1h->add_humongous_reclaim_candidate(rindex);
3566 g1h->register_humongous_region_with_cset(rindex);
3567 _candidate_humongous++;
3568 }
3569 _total_humongous++;
3570
3571 return false;
3572 }
3573
3574 size_t total_humongous() const { return _total_humongous; }
3575 size_t candidate_humongous() const { return _candidate_humongous; }
3576
3577 void flush_rem_set_entries() { _dcq.flush(); }
3578 };
3579
3580 void G1CollectedHeap::register_humongous_regions_with_cset() {
3581 if (!G1EagerReclaimHumongousObjects) {
3582 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3583 return;
3584 }
3585 double time = os::elapsed_counter();
3586
3587 // Collect reclaim candidate information and register candidates with cset.
3588 RegisterHumongousWithInCSetFastTestClosure cl;
3589 heap_region_iterate(&cl);
3590
3591 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3592 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3593 cl.total_humongous(),
3594 cl.candidate_humongous());
3595 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3596
3597 // Finally flush all remembered set entries to re-check into the global DCQS.
3598 cl.flush_rem_set_entries();
3599 }
3600
3601 void
3602 G1CollectedHeap::setup_surviving_young_words() {
3603 assert(_surviving_young_words == NULL, "pre-condition");
3604 uint array_length = g1_policy()->young_cset_region_length();
3605 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3606 if (_surviving_young_words == NULL) {
3607 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3608 "Not enough space for young surv words summary.");
3609 }
3610 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3611 #ifdef ASSERT
3612 for (uint i = 0; i < array_length; ++i) {
3613 assert( _surviving_young_words[i] == 0, "memset above" );
3614 }
3615 #endif // !ASSERT
3616 }
6193 // remembered set)
6194 // - as soon there is a remembered set entry to the humongous starts region
6195 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6196 // until the end of a concurrent mark.
6197 //
6198 // It is not required to check whether the object has been found dead by marking
6199 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6200 // all objects allocated during that time are considered live.
6201 // SATB marking is even more conservative than the remembered set.
6202 // So if at this point in the collection there is no remembered set entry,
6203 // nobody has a reference to it.
6204 // At the start of collection we flush all refinement logs, and remembered sets
6205 // are completely up-to-date wrt to references to the humongous object.
6206 //
6207 // Other implementation considerations:
6208 // - never consider object arrays at this time because they would pose
6209 // considerable effort for cleaning up the the remembered sets. This is
6210 // required because stale remembered sets might reference locations that
6211 // are currently allocated into.
6212 uint region_idx = r->hrm_index();
6213 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
6214 !r->rem_set()->is_empty()) {
6215
6216 if (G1TraceEagerReclaimHumongousObjects) {
6217 gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6218 region_idx,
6219 obj->size()*HeapWordSize,
6220 r->bottom(),
6221 r->region_num(),
6222 r->rem_set()->occupied(),
6223 r->rem_set()->strong_code_roots_list_length(),
6224 next_bitmap->isMarked(r->bottom()),
6225 g1h->is_humongous_reclaim_candidate(region_idx),
6226 obj->is_typeArray()
6227 );
6228 }
6229
6230 return false;
6231 }
6232
6233 guarantee(obj->is_typeArray(),
6234 err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6235 PTR_FORMAT " is not.",
6236 r->bottom()));
6237
6238 if (G1TraceEagerReclaimHumongousObjects) {
6239 gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6240 region_idx,
6241 obj->size()*HeapWordSize,
6242 r->bottom(),
6243 r->region_num(),
6244 r->rem_set()->occupied(),
6245 r->rem_set()->strong_code_roots_list_length(),
6246 next_bitmap->isMarked(r->bottom()),
6247 g1h->is_humongous_reclaim_candidate(region_idx),
6248 obj->is_typeArray()
6249 );
6250 }
6251 // Need to clear mark bit of the humongous object if already set.
6252 if (next_bitmap->isMarked(r->bottom())) {
6253 next_bitmap->clear(r->bottom());
6254 }
6255 _freed_bytes += r->used();
6256 r->set_containing_set(NULL);
6257 _humongous_regions_removed.increment(1u, r->capacity());
6258 g1h->free_humongous_region(r, _free_region_list, false);
6259
6260 return false;
6261 }
6262
6263 HeapRegionSetCount& humongous_free_count() {
6264 return _humongous_regions_removed;
6265 }
6266
6267 size_t bytes_freed() const {
6268 return _freed_bytes;
|