src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 7007 : imported patch claimer

1646     // find the first violating region by returning true.
1647     return false;
1648   }
1649 };
1650 
1651 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1652 protected:
1653   G1CollectedHeap* _g1h;
1654   ConcurrentMark* _cm;
1655   BitMap* _actual_region_bm;
1656   BitMap* _actual_card_bm;
1657 
1658   uint    _n_workers;
1659 
1660   BitMap* _expected_region_bm;
1661   BitMap* _expected_card_bm;
1662 
1663   int  _failures;
1664   bool _verbose;
1665 


1666 public:
1667   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1668                             BitMap* region_bm, BitMap* card_bm,
1669                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1670     : AbstractGangTask("G1 verify final counting"),
1671       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1672       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1673       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1674       _failures(0), _verbose(false),
1675       _n_workers(0) {
1676     assert(VerifyDuringGC, "don't call this otherwise");
1677 
1678     // Use the value already set as the number of active threads
1679     // in the call to run_task().
1680     if (G1CollectedHeap::use_parallel_gc_threads()) {
1681       assert( _g1h->workers()->active_workers() > 0,
1682         "Should have been previously set");
1683       _n_workers = _g1h->workers()->active_workers();

1684     } else {
1685       _n_workers = 1;
1686     }
1687 
1688     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1689     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1690 
1691     _verbose = _cm->verbose_medium();
1692   }
1693 
1694   void work(uint worker_id) {
1695     assert(worker_id < _n_workers, "invariant");
1696 
1697     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1698                                             _actual_region_bm, _actual_card_bm,
1699                                             _expected_region_bm,
1700                                             _expected_card_bm,
1701                                             _verbose);
1702 
1703     if (G1CollectedHeap::use_parallel_gc_threads()) {
1704       _g1h->heap_region_par_iterate_chunked(&verify_cl,
1705                                             worker_id,
1706                                             _n_workers,
1707                                             HeapRegion::VerifyCountClaimValue);
1708     } else {
1709       _g1h->heap_region_iterate(&verify_cl);
1710     }
1711 
1712     Atomic::add(verify_cl.failures(), &_failures);
1713   }
1714 
1715   int failures() const { return _failures; }
1716 };
1717 
1718 // Closure that finalizes the liveness counting data.
1719 // Used during the cleanup pause.
1720 // Sets the bits corresponding to the interval [NTAMS, top]
1721 // (which contains the implicitly live objects) in the
1722 // card liveness bitmap. Also sets the bit for each region,
1723 // containing live data, in the region liveness bitmap.
1724 
1725 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1726  public:
1727   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,


1776       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1777     }
1778 
1779     // Set the bit for the region if it contains live data
1780     if (hr->next_marked_bytes() > 0) {
1781       set_bit_for_region(hr);
1782     }
1783 
1784     return false;
1785   }
1786 };
1787 
1788 class G1ParFinalCountTask: public AbstractGangTask {
1789 protected:
1790   G1CollectedHeap* _g1h;
1791   ConcurrentMark* _cm;
1792   BitMap* _actual_region_bm;
1793   BitMap* _actual_card_bm;
1794 
1795   uint    _n_workers;

1796 
1797 public:
1798   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1799     : AbstractGangTask("G1 final counting"),
1800       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1801       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1802       _n_workers(0) {
1803     // Use the value already set as the number of active threads
1804     // in the call to run_task().
1805     if (G1CollectedHeap::use_parallel_gc_threads()) {
1806       assert( _g1h->workers()->active_workers() > 0,
1807         "Should have been previously set");
1808       _n_workers = _g1h->workers()->active_workers();

1809     } else {
1810       _n_workers = 1;
1811     }
1812   }
1813 
1814   void work(uint worker_id) {
1815     assert(worker_id < _n_workers, "invariant");
1816 
1817     FinalCountDataUpdateClosure final_update_cl(_g1h,
1818                                                 _actual_region_bm,
1819                                                 _actual_card_bm);
1820 
1821     if (G1CollectedHeap::use_parallel_gc_threads()) {
1822       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
1823                                             worker_id,
1824                                             _n_workers,
1825                                             HeapRegion::FinalCountClaimValue);
1826     } else {
1827       _g1h->heap_region_iterate(&final_update_cl);
1828     }
1829   }
1830 };
1831 
1832 class G1ParNoteEndTask;
1833 
1834 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1835   G1CollectedHeap* _g1;
1836   size_t _max_live_bytes;
1837   uint _regions_claimed;
1838   size_t _freed_bytes;
1839   FreeRegionList* _local_cleanup_list;
1840   HeapRegionSetCount _old_regions_removed;
1841   HeapRegionSetCount _humongous_regions_removed;
1842   HRRSCleanupTask* _hrrs_cleanup_task;
1843   double _claimed_region_time;
1844   double _max_region_time;
1845 


1892     if (region_time > _max_region_time) {
1893       _max_region_time = region_time;
1894     }
1895     return false;
1896   }
1897 
1898   size_t max_live_bytes() { return _max_live_bytes; }
1899   uint regions_claimed() { return _regions_claimed; }
1900   double claimed_region_time_sec() { return _claimed_region_time; }
1901   double max_region_time_sec() { return _max_region_time; }
1902 };
1903 
1904 class G1ParNoteEndTask: public AbstractGangTask {
1905   friend class G1NoteEndOfConcMarkClosure;
1906 
1907 protected:
1908   G1CollectedHeap* _g1h;
1909   size_t _max_live_bytes;
1910   size_t _freed_bytes;
1911   FreeRegionList* _cleanup_list;

1912 
1913 public:
1914   G1ParNoteEndTask(G1CollectedHeap* g1h,
1915                    FreeRegionList* cleanup_list) :
1916     AbstractGangTask("G1 note end"), _g1h(g1h),
1917     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }


1918 
1919   void work(uint worker_id) {
1920     double start = os::elapsedTime();
1921     FreeRegionList local_cleanup_list("Local Cleanup List");
1922     HRRSCleanupTask hrrs_cleanup_task;
1923     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1924                                            &hrrs_cleanup_task);
1925     if (G1CollectedHeap::use_parallel_gc_threads()) {
1926       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
1927                                             _g1h->workers()->active_workers(),
1928                                             HeapRegion::NoteEndClaimValue);
1929     } else {
1930       _g1h->heap_region_iterate(&g1_note_end);
1931     }
1932     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1933 
1934     // Now update the lists
1935     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1936     {
1937       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1938       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1939       _max_live_bytes += g1_note_end.max_live_bytes();
1940       _freed_bytes += g1_note_end.freed_bytes();
1941 
1942       // If we iterate over the global cleanup list at the end of
1943       // cleanup to do this printing we will not guarantee to only
1944       // generate output for the newly-reclaimed regions (the list
1945       // might not be empty at the beginning of cleanup; we might
1946       // still be working on its previous contents). So we do the
1947       // printing here, before we append the new regions to the global
1948       // cleanup list.


1954           HeapRegion* hr = iter.get_next();
1955           hr_printer->cleanup(hr);
1956         }
1957       }
1958 
1959       _cleanup_list->add_ordered(&local_cleanup_list);
1960       assert(local_cleanup_list.is_empty(), "post-condition");
1961 
1962       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1963     }
1964   }
1965   size_t max_live_bytes() { return _max_live_bytes; }
1966   size_t freed_bytes() { return _freed_bytes; }
1967 };
1968 
1969 class G1ParScrubRemSetTask: public AbstractGangTask {
1970 protected:
1971   G1RemSet* _g1rs;
1972   BitMap* _region_bm;
1973   BitMap* _card_bm;


1974 public:
1975   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
1976                        BitMap* region_bm, BitMap* card_bm) :
1977     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
1978     _region_bm(region_bm), _card_bm(card_bm) { }


1979 
1980   void work(uint worker_id) {
1981     if (G1CollectedHeap::use_parallel_gc_threads()) {
1982       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
1983                        HeapRegion::ScrubRemSetClaimValue);
1984     } else {
1985       _g1rs->scrub(_region_bm, _card_bm);
1986     }
1987   }
1988 
1989 };
1990 
1991 void ConcurrentMark::cleanup() {
1992   // world is stopped at this checkpoint
1993   assert(SafepointSynchronize::is_at_safepoint(),
1994          "world should be stopped");
1995   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1996 
1997   // If a full collection has happened, we shouldn't do this.
1998   if (has_aborted()) {
1999     g1h->set_marking_complete(); // So bitmap clearing isn't confused
2000     return;
2001   }
2002 
2003   g1h->verify_region_sets_optional();


2006     HandleMark hm;  // handle scope
2007     Universe::heap()->prepare_for_verify();
2008     Universe::verify(VerifyOption_G1UsePrevMarking,
2009                      " VerifyDuringGC:(before)");
2010   }
2011   g1h->check_bitmaps("Cleanup Start");
2012 
2013   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2014   g1p->record_concurrent_mark_cleanup_start();
2015 
2016   double start = os::elapsedTime();
2017 
2018   HeapRegionRemSet::reset_for_cleanup_tasks();
2019 
2020   uint n_workers;
2021 
2022   // Do counting once more with the world stopped for good measure.
2023   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2024 
2025   if (G1CollectedHeap::use_parallel_gc_threads()) {
2026    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2027            "sanity check");
2028 
2029     g1h->set_par_threads();
2030     n_workers = g1h->n_par_threads();
2031     assert(g1h->n_par_threads() == n_workers,
2032            "Should not have been reset");
2033     g1h->workers()->run_task(&g1_par_count_task);
2034     // Done with the parallel phase so reset to 0.
2035     g1h->set_par_threads(0);
2036 
2037     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
2038            "sanity check");
2039   } else {
2040     n_workers = 1;
2041     g1_par_count_task.work(0);
2042   }
2043 
2044   if (VerifyDuringGC) {
2045     // Verify that the counting data accumulated during marking matches
2046     // that calculated by walking the marking bitmap.
2047 
2048     // Bitmaps to hold expected values
2049     BitMap expected_region_bm(_region_bm.size(), true);
2050     BitMap expected_card_bm(_card_bm.size(), true);
2051 
2052     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2053                                                  &_region_bm,
2054                                                  &_card_bm,
2055                                                  &expected_region_bm,
2056                                                  &expected_card_bm);
2057 
2058     if (G1CollectedHeap::use_parallel_gc_threads()) {
2059       g1h->set_par_threads((int)n_workers);
2060       g1h->workers()->run_task(&g1_par_verify_task);
2061       // Done with the parallel phase so reset to 0.
2062       g1h->set_par_threads(0);
2063 
2064       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
2065              "sanity check");
2066     } else {
2067       g1_par_verify_task.work(0);
2068     }
2069 
2070     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2071   }
2072 
2073   size_t start_used_bytes = g1h->used();
2074   g1h->set_marking_complete();
2075 
2076   double count_end = os::elapsedTime();
2077   double this_final_counting_time = (count_end - start);
2078   _total_counting_time += this_final_counting_time;
2079 
2080   if (G1PrintRegionLivenessInfo) {
2081     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2082     _g1h->heap_region_iterate(&cl);
2083   }
2084 
2085   // Install newly created mark bitMap as "prev".
2086   swapMarkBitMaps();
2087 
2088   g1h->reset_gc_time_stamp();
2089 
2090   // Note end of marking in all heap regions.
2091   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
2092   if (G1CollectedHeap::use_parallel_gc_threads()) {
2093     g1h->set_par_threads((int)n_workers);
2094     g1h->workers()->run_task(&g1_par_note_end_task);
2095     g1h->set_par_threads(0);
2096 
2097     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
2098            "sanity check");
2099   } else {
2100     g1_par_note_end_task.work(0);
2101   }
2102   g1h->check_gc_time_stamps();
2103 
2104   if (!cleanup_list_is_empty()) {
2105     // The cleanup list is not empty, so we'll have to process it
2106     // concurrently. Notify anyone else that might be wanting free
2107     // regions that there will be more free regions coming soon.
2108     g1h->set_free_regions_coming();
2109   }
2110 
2111   // call below, since it affects the metric by which we sort the heap
2112   // regions.
2113   if (G1ScrubRemSets) {
2114     double rs_scrub_start = os::elapsedTime();
2115     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
2116     if (G1CollectedHeap::use_parallel_gc_threads()) {
2117       g1h->set_par_threads((int)n_workers);
2118       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2119       g1h->set_par_threads(0);
2120 
2121       assert(g1h->check_heap_region_claim_values(
2122                                             HeapRegion::ScrubRemSetClaimValue),
2123              "sanity check");
2124     } else {
2125       g1_par_scrub_rs_task.work(0);
2126     }
2127 
2128     double rs_scrub_end = os::elapsedTime();
2129     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2130     _total_rs_scrub_time += this_rs_scrub_time;
2131   }
2132 
2133   // this will also free any regions totally full of garbage objects,
2134   // and sort the regions.
2135   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2136 
2137   // Statistics.
2138   double end = os::elapsedTime();
2139   _cleanup_times.add((end - start) * 1000.0);
2140 
2141   if (G1Log::fine()) {
2142     g1h->print_size_transition(gclog_or_tty,
2143                                start_used_bytes,


3270         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3271         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3272       }
3273     }
3274 
3275     // Update the marked bytes for this region.
3276     hr->add_to_marked_bytes(marked_bytes);
3277 
3278     // Next heap region
3279     return false;
3280   }
3281 };
3282 
3283 class G1AggregateCountDataTask: public AbstractGangTask {
3284 protected:
3285   G1CollectedHeap* _g1h;
3286   ConcurrentMark* _cm;
3287   BitMap* _cm_card_bm;
3288   uint _max_worker_id;
3289   int _active_workers;

3290 
3291 public:
3292   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3293                            ConcurrentMark* cm,
3294                            BitMap* cm_card_bm,
3295                            uint max_worker_id,
3296                            int n_workers) :
3297     AbstractGangTask("Count Aggregation"),
3298     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3299     _max_worker_id(max_worker_id),
3300     _active_workers(n_workers) { }




3301 
3302   void work(uint worker_id) {
3303     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3304 
3305     if (G1CollectedHeap::use_parallel_gc_threads()) {
3306       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
3307                                             _active_workers,
3308                                             HeapRegion::AggregateCountClaimValue);
3309     } else {
3310       _g1h->heap_region_iterate(&cl);
3311     }
3312   }
3313 };
3314 
3315 
3316 void ConcurrentMark::aggregate_count_data() {
3317   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3318                         _g1h->workers()->active_workers() :
3319                         1);
3320 
3321   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3322                                            _max_worker_id, n_workers);
3323 
3324   if (G1CollectedHeap::use_parallel_gc_threads()) {
3325     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3326            "sanity check");
3327     _g1h->set_par_threads(n_workers);
3328     _g1h->workers()->run_task(&g1_par_agg_task);
3329     _g1h->set_par_threads(0);
3330 
3331     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
3332            "sanity check");
3333     _g1h->reset_heap_region_claim_values();
3334   } else {
3335     g1_par_agg_task.work(0);
3336   }
3337 }
3338 
3339 // Clear the per-worker arrays used to store the per-region counting data
3340 void ConcurrentMark::clear_all_count_data() {
3341   // Clear the global card bitmap - it will be filled during
3342   // liveness count aggregation (during remark) and the
3343   // final counting task.
3344   _card_bm.clear();
3345 
3346   // Clear the global region bitmap - it will be filled as part
3347   // of the final counting task.
3348   _region_bm.clear();
3349 
3350   uint max_regions = _g1h->max_regions();
3351   assert(_max_worker_id > 0, "uninitialized");
3352 
3353   for (uint i = 0; i < _max_worker_id; i += 1) {



1646     // find the first violating region by returning true.
1647     return false;
1648   }
1649 };
1650 
1651 class G1ParVerifyFinalCountTask: public AbstractGangTask {
1652 protected:
1653   G1CollectedHeap* _g1h;
1654   ConcurrentMark* _cm;
1655   BitMap* _actual_region_bm;
1656   BitMap* _actual_card_bm;
1657 
1658   uint    _n_workers;
1659 
1660   BitMap* _expected_region_bm;
1661   BitMap* _expected_card_bm;
1662 
1663   int  _failures;
1664   bool _verbose;
1665 
1666   HeapRegionClaimer _hrclaimer;
1667 
1668 public:
1669   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
1670                             BitMap* region_bm, BitMap* card_bm,
1671                             BitMap* expected_region_bm, BitMap* expected_card_bm)
1672     : AbstractGangTask("G1 verify final counting"),
1673       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1674       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1675       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
1676       _failures(0), _verbose(false),
1677       _n_workers(0) {
1678     assert(VerifyDuringGC, "don't call this otherwise");
1679 
1680     // Use the value already set as the number of active threads
1681     // in the call to run_task().
1682     if (G1CollectedHeap::use_parallel_gc_threads()) {
1683       assert( _g1h->workers()->active_workers() > 0,
1684         "Should have been previously set");
1685       _n_workers = _g1h->workers()->active_workers();
1686       _hrclaimer.initialize(_n_workers);
1687     } else {
1688       _n_workers = 1;
1689     }
1690 
1691     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
1692     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
1693 
1694     _verbose = _cm->verbose_medium();
1695   }
1696 
1697   void work(uint worker_id) {
1698     assert(worker_id < _n_workers, "invariant");
1699 
1700     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
1701                                             _actual_region_bm, _actual_card_bm,
1702                                             _expected_region_bm,
1703                                             _expected_card_bm,
1704                                             _verbose);
1705 
1706     if (G1CollectedHeap::use_parallel_gc_threads()) {
1707       _g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);



1708     } else {
1709       _g1h->heap_region_iterate(&verify_cl);
1710     }
1711 
1712     Atomic::add(verify_cl.failures(), &_failures);
1713   }
1714 
1715   int failures() const { return _failures; }
1716 };
1717 
1718 // Closure that finalizes the liveness counting data.
1719 // Used during the cleanup pause.
1720 // Sets the bits corresponding to the interval [NTAMS, top]
1721 // (which contains the implicitly live objects) in the
1722 // card liveness bitmap. Also sets the bit for each region,
1723 // containing live data, in the region liveness bitmap.
1724 
1725 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
1726  public:
1727   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,


1776       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
1777     }
1778 
1779     // Set the bit for the region if it contains live data
1780     if (hr->next_marked_bytes() > 0) {
1781       set_bit_for_region(hr);
1782     }
1783 
1784     return false;
1785   }
1786 };
1787 
1788 class G1ParFinalCountTask: public AbstractGangTask {
1789 protected:
1790   G1CollectedHeap* _g1h;
1791   ConcurrentMark* _cm;
1792   BitMap* _actual_region_bm;
1793   BitMap* _actual_card_bm;
1794 
1795   uint    _n_workers;
1796   HeapRegionClaimer _hrclaimer;
1797 
1798 public:
1799   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
1800     : AbstractGangTask("G1 final counting"),
1801       _g1h(g1h), _cm(_g1h->concurrent_mark()),
1802       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
1803       _n_workers(0) {
1804     // Use the value already set as the number of active threads
1805     // in the call to run_task().
1806     if (G1CollectedHeap::use_parallel_gc_threads()) {
1807       assert( _g1h->workers()->active_workers() > 0,
1808         "Should have been previously set");
1809       _n_workers = _g1h->workers()->active_workers();
1810       _hrclaimer.initialize(_n_workers);
1811     } else {
1812       _n_workers = 1;
1813     }
1814   }
1815 
1816   void work(uint worker_id) {
1817     assert(worker_id < _n_workers, "invariant");
1818 
1819     FinalCountDataUpdateClosure final_update_cl(_g1h,
1820                                                 _actual_region_bm,
1821                                                 _actual_card_bm);
1822 
1823     if (G1CollectedHeap::use_parallel_gc_threads()) {
1824       _g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);



1825     } else {
1826       _g1h->heap_region_iterate(&final_update_cl);
1827     }
1828   }
1829 };
1830 
1831 class G1ParNoteEndTask;
1832 
1833 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1834   G1CollectedHeap* _g1;
1835   size_t _max_live_bytes;
1836   uint _regions_claimed;
1837   size_t _freed_bytes;
1838   FreeRegionList* _local_cleanup_list;
1839   HeapRegionSetCount _old_regions_removed;
1840   HeapRegionSetCount _humongous_regions_removed;
1841   HRRSCleanupTask* _hrrs_cleanup_task;
1842   double _claimed_region_time;
1843   double _max_region_time;
1844 


1891     if (region_time > _max_region_time) {
1892       _max_region_time = region_time;
1893     }
1894     return false;
1895   }
1896 
1897   size_t max_live_bytes() { return _max_live_bytes; }
1898   uint regions_claimed() { return _regions_claimed; }
1899   double claimed_region_time_sec() { return _claimed_region_time; }
1900   double max_region_time_sec() { return _max_region_time; }
1901 };
1902 
1903 class G1ParNoteEndTask: public AbstractGangTask {
1904   friend class G1NoteEndOfConcMarkClosure;
1905 
1906 protected:
1907   G1CollectedHeap* _g1h;
1908   size_t _max_live_bytes;
1909   size_t _freed_bytes;
1910   FreeRegionList* _cleanup_list;
1911   HeapRegionClaimer _hrclaimer;
1912 
1913 public:
1914   G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1915       AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) {
1916     if (G1CollectedHeap::use_parallel_gc_threads()) {
1917       _hrclaimer.initialize(n_workers);
1918     }
1919   }
1920 
1921   void work(uint worker_id) {
1922     double start = os::elapsedTime();
1923     FreeRegionList local_cleanup_list("Local Cleanup List");
1924     HRRSCleanupTask hrrs_cleanup_task;
1925     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1926                                            &hrrs_cleanup_task);
1927     if (G1CollectedHeap::use_parallel_gc_threads()) {
1928       _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);


1929     } else {
1930       _g1h->heap_region_iterate(&g1_note_end);
1931     }
1932     assert(g1_note_end.complete(), "Shouldn't have yielded!");
1933 
1934     // Now update the lists
1935     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1936     {
1937       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1938       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1939       _max_live_bytes += g1_note_end.max_live_bytes();
1940       _freed_bytes += g1_note_end.freed_bytes();
1941 
1942       // If we iterate over the global cleanup list at the end of
1943       // cleanup to do this printing we will not guarantee to only
1944       // generate output for the newly-reclaimed regions (the list
1945       // might not be empty at the beginning of cleanup; we might
1946       // still be working on its previous contents). So we do the
1947       // printing here, before we append the new regions to the global
1948       // cleanup list.


1954           HeapRegion* hr = iter.get_next();
1955           hr_printer->cleanup(hr);
1956         }
1957       }
1958 
1959       _cleanup_list->add_ordered(&local_cleanup_list);
1960       assert(local_cleanup_list.is_empty(), "post-condition");
1961 
1962       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
1963     }
1964   }
1965   size_t max_live_bytes() { return _max_live_bytes; }
1966   size_t freed_bytes() { return _freed_bytes; }
1967 };
1968 
1969 class G1ParScrubRemSetTask: public AbstractGangTask {
1970 protected:
1971   G1RemSet* _g1rs;
1972   BitMap* _region_bm;
1973   BitMap* _card_bm;
1974   HeapRegionClaimer _hrclaimer;
1975 
1976 public:
1977   G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
1978       AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm) {
1979     if (G1CollectedHeap::use_parallel_gc_threads()) {
1980       _hrclaimer.initialize(n_workers);
1981     }
1982   }
1983 
1984   void work(uint worker_id) {
1985     if (G1CollectedHeap::use_parallel_gc_threads()) {
1986       _g1rs->scrub_par(_region_bm, _card_bm, worker_id, &_hrclaimer);

1987     } else {
1988       _g1rs->scrub(_region_bm, _card_bm);
1989     }
1990   }
1991 
1992 };
1993 
1994 void ConcurrentMark::cleanup() {
1995   // world is stopped at this checkpoint
1996   assert(SafepointSynchronize::is_at_safepoint(),
1997          "world should be stopped");
1998   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1999 
2000   // If a full collection has happened, we shouldn't do this.
2001   if (has_aborted()) {
2002     g1h->set_marking_complete(); // So bitmap clearing isn't confused
2003     return;
2004   }
2005 
2006   g1h->verify_region_sets_optional();


2009     HandleMark hm;  // handle scope
2010     Universe::heap()->prepare_for_verify();
2011     Universe::verify(VerifyOption_G1UsePrevMarking,
2012                      " VerifyDuringGC:(before)");
2013   }
2014   g1h->check_bitmaps("Cleanup Start");
2015 
2016   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
2017   g1p->record_concurrent_mark_cleanup_start();
2018 
2019   double start = os::elapsedTime();
2020 
2021   HeapRegionRemSet::reset_for_cleanup_tasks();
2022 
2023   uint n_workers;
2024 
2025   // Do counting once more with the world stopped for good measure.
2026   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
2027 
2028   if (G1CollectedHeap::use_parallel_gc_threads()) {



2029     g1h->set_par_threads();
2030     n_workers = g1h->n_par_threads();
2031     assert(g1h->n_par_threads() == n_workers,
2032            "Should not have been reset");
2033     g1h->workers()->run_task(&g1_par_count_task);
2034     // Done with the parallel phase so reset to 0.
2035     g1h->set_par_threads(0);



2036   } else {
2037     n_workers = 1;
2038     g1_par_count_task.work(0);
2039   }
2040 
2041   if (VerifyDuringGC) {
2042     // Verify that the counting data accumulated during marking matches
2043     // that calculated by walking the marking bitmap.
2044 
2045     // Bitmaps to hold expected values
2046     BitMap expected_region_bm(_region_bm.size(), true);
2047     BitMap expected_card_bm(_card_bm.size(), true);
2048 
2049     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
2050                                                  &_region_bm,
2051                                                  &_card_bm,
2052                                                  &expected_region_bm,
2053                                                  &expected_card_bm);
2054 
2055     if (G1CollectedHeap::use_parallel_gc_threads()) {
2056       g1h->set_par_threads((int)n_workers);
2057       g1h->workers()->run_task(&g1_par_verify_task);
2058       // Done with the parallel phase so reset to 0.
2059       g1h->set_par_threads(0);



2060     } else {
2061       g1_par_verify_task.work(0);
2062     }
2063 
2064     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
2065   }
2066 
2067   size_t start_used_bytes = g1h->used();
2068   g1h->set_marking_complete();
2069 
2070   double count_end = os::elapsedTime();
2071   double this_final_counting_time = (count_end - start);
2072   _total_counting_time += this_final_counting_time;
2073 
2074   if (G1PrintRegionLivenessInfo) {
2075     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
2076     _g1h->heap_region_iterate(&cl);
2077   }
2078 
2079   // Install newly created mark bitMap as "prev".
2080   swapMarkBitMaps();
2081 
2082   g1h->reset_gc_time_stamp();
2083 
2084   // Note end of marking in all heap regions.
2085   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
2086   if (G1CollectedHeap::use_parallel_gc_threads()) {
2087     g1h->set_par_threads((int)n_workers);
2088     g1h->workers()->run_task(&g1_par_note_end_task);
2089     g1h->set_par_threads(0);



2090   } else {
2091     g1_par_note_end_task.work(0);
2092   }
2093   g1h->check_gc_time_stamps();
2094 
2095   if (!cleanup_list_is_empty()) {
2096     // The cleanup list is not empty, so we'll have to process it
2097     // concurrently. Notify anyone else that might be wanting free
2098     // regions that there will be more free regions coming soon.
2099     g1h->set_free_regions_coming();
2100   }
2101 
2102   // call below, since it affects the metric by which we sort the heap
2103   // regions.
2104   if (G1ScrubRemSets) {
2105     double rs_scrub_start = os::elapsedTime();
2106     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2107     if (G1CollectedHeap::use_parallel_gc_threads()) {
2108       g1h->set_par_threads((int)n_workers);
2109       g1h->workers()->run_task(&g1_par_scrub_rs_task);
2110       g1h->set_par_threads(0);




2111     } else {
2112       g1_par_scrub_rs_task.work(0);
2113     }
2114 
2115     double rs_scrub_end = os::elapsedTime();
2116     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2117     _total_rs_scrub_time += this_rs_scrub_time;
2118   }
2119 
2120   // this will also free any regions totally full of garbage objects,
2121   // and sort the regions.
2122   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2123 
2124   // Statistics.
2125   double end = os::elapsedTime();
2126   _cleanup_times.add((end - start) * 1000.0);
2127 
2128   if (G1Log::fine()) {
2129     g1h->print_size_transition(gclog_or_tty,
2130                                start_used_bytes,


3257         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
3258         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
3259       }
3260     }
3261 
3262     // Update the marked bytes for this region.
3263     hr->add_to_marked_bytes(marked_bytes);
3264 
3265     // Next heap region
3266     return false;
3267   }
3268 };
3269 
3270 class G1AggregateCountDataTask: public AbstractGangTask {
3271 protected:
3272   G1CollectedHeap* _g1h;
3273   ConcurrentMark* _cm;
3274   BitMap* _cm_card_bm;
3275   uint _max_worker_id;
3276   int _active_workers;
3277   HeapRegionClaimer _hrclaimer;
3278 
3279 public:
3280   G1AggregateCountDataTask(G1CollectedHeap* g1h,
3281                            ConcurrentMark* cm,
3282                            BitMap* cm_card_bm,
3283                            uint max_worker_id,
3284                            int n_workers) :
3285       AbstractGangTask("Count Aggregation"),
3286       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
3287       _max_worker_id(max_worker_id),
3288       _active_workers(n_workers) {
3289     if (G1CollectedHeap::use_parallel_gc_threads()) {
3290       _hrclaimer.initialize(_active_workers);
3291     }
3292   }
3293 
3294   void work(uint worker_id) {
3295     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3296 
3297     if (G1CollectedHeap::use_parallel_gc_threads()) {
3298       _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);


3299     } else {
3300       _g1h->heap_region_iterate(&cl);
3301     }
3302   }
3303 };
3304 
3305 
3306 void ConcurrentMark::aggregate_count_data() {
3307   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3308                         _g1h->workers()->active_workers() :
3309                         1);
3310 
3311   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3312                                            _max_worker_id, n_workers);
3313 
3314   if (G1CollectedHeap::use_parallel_gc_threads()) {


3315     _g1h->set_par_threads(n_workers);
3316     _g1h->workers()->run_task(&g1_par_agg_task);
3317     _g1h->set_par_threads(0);




3318   } else {
3319     g1_par_agg_task.work(0);
3320   }
3321 }
3322 
3323 // Clear the per-worker arrays used to store the per-region counting data
3324 void ConcurrentMark::clear_all_count_data() {
3325   // Clear the global card bitmap - it will be filled during
3326   // liveness count aggregation (during remark) and the
3327   // final counting task.
3328   _card_bm.clear();
3329 
3330   // Clear the global region bitmap - it will be filled as part
3331   // of the final counting task.
3332   _region_bm.clear();
3333 
3334   uint max_regions = _g1h->max_regions();
3335   assert(_max_worker_id > 0, "uninitialized");
3336 
3337   for (uint i = 0; i < _max_worker_id; i += 1) {