2761
2762 void G1CollectedHeap::do_concurrent_mark() {
2763 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
2764 if (!_cm_thread->in_progress()) {
2765 _cm_thread->set_started();
2766 CGC_lock->notify();
2767 }
2768 }
2769
2770 size_t G1CollectedHeap::pending_card_num() {
2771 struct CountCardsClosure : public ThreadClosure {
2772 size_t _cards;
2773 CountCardsClosure() : _cards(0) {}
2774 virtual void do_thread(Thread* t) {
2775 _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2776 }
2777 } count_from_threads;
2778 Threads::threads_do(&count_from_threads);
2779
2780 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2781 dcqs.verify_num_cards();
2782
2783 return dcqs.num_cards() + count_from_threads._cards;
2784 }
2785
2786 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2787 // We don't nominate objects with many remembered set entries, on
2788 // the assumption that such objects are likely still live.
2789 HeapRegionRemSet* rem_set = r->rem_set();
2790
2791 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2792 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2793 G1EagerReclaimHumongousObjects && rem_set->is_empty();
2794 }
2795
2796 #ifndef PRODUCT
2797 void G1CollectedHeap::verify_region_attr_remset_update() {
2798 class VerifyRegionAttrRemSet : public HeapRegionClosure {
2799 public:
2800 virtual bool do_heap_region(HeapRegion* r) {
2801 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2802 bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
|
2761
2762 void G1CollectedHeap::do_concurrent_mark() {
2763 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
2764 if (!_cm_thread->in_progress()) {
2765 _cm_thread->set_started();
2766 CGC_lock->notify();
2767 }
2768 }
2769
2770 size_t G1CollectedHeap::pending_card_num() {
2771 struct CountCardsClosure : public ThreadClosure {
2772 size_t _cards;
2773 CountCardsClosure() : _cards(0) {}
2774 virtual void do_thread(Thread* t) {
2775 _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2776 }
2777 } count_from_threads;
2778 Threads::threads_do(&count_from_threads);
2779
2780 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2781 return dcqs.num_cards() + count_from_threads._cards;
2782 }
2783
2784 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2785 // We don't nominate objects with many remembered set entries, on
2786 // the assumption that such objects are likely still live.
2787 HeapRegionRemSet* rem_set = r->rem_set();
2788
2789 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2790 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2791 G1EagerReclaimHumongousObjects && rem_set->is_empty();
2792 }
2793
2794 #ifndef PRODUCT
2795 void G1CollectedHeap::verify_region_attr_remset_update() {
2796 class VerifyRegionAttrRemSet : public HeapRegionClosure {
2797 public:
2798 virtual bool do_heap_region(HeapRegion* r) {
2799 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2800 bool const needs_remset_update = g1h->region_attr(r->bottom()).needs_remset_update();
|