--- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2014-07-07 12:35:46.332150992 +0200 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2014-07-07 12:35:46.255148750 +0200 @@ -135,8 +135,48 @@ return true; } +// Closure used for clearing the given mark bitmap. +class ClearBitmapHRClosure : public HeapRegionClosure { + private: + ConcurrentMark* _cm; + CMBitMap* _bitmap; + bool _may_yield; + public: + ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(yield) { + assert(!yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); + } + + virtual bool doHeapRegion(HeapRegion* r) { + size_t const chunk_size_in_words = M / HeapWordSize; + + HeapWord* cur = r->bottom(); + HeapWord* const end = r->end(); + + while (cur < end) { + MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); + _bitmap->clearRange(mr); + + cur += chunk_size_in_words; + + // Abort iteration if after yielding the marking has been aborted. + if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { + return true; + } + // Repeat the asserts from before the start of the closure. We will do them + // as asserts here to minimize their overhead on the product. However, we + // will have them as guarantees at the beginning / end of the bitmap + // clearing to get some checking in the product. + assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); + assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); + } + + return false; + } +}; + void CMBitMap::clearAll() { - _bm.clear(); + ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); + G1CollectedHeap::heap()->heap_region_iterate(&cl); return; } @@ -844,7 +884,6 @@ void ConcurrentMark::clearNextBitmap() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); // Make sure that the concurrent mark thread looks to still be in // the current cycle. @@ -856,33 +895,14 @@ // is the case. guarantee(!g1h->mark_in_progress(), "invariant"); - // clear the mark bitmap (no grey objects to start with). - // We need to do this in chunks and offer to yield in between - // each chunk. - HeapWord* start = _nextMarkBitMap->startWord(); - HeapWord* end = _nextMarkBitMap->endWord(); - HeapWord* cur = start; - size_t chunkSize = M; - while (cur < end) { - HeapWord* next = cur + chunkSize; - if (next > end) { - next = end; - } - MemRegion mr(cur,next); - _nextMarkBitMap->clearRange(mr); - cur = next; - do_yield_check(); - - // Repeat the asserts from above. We'll do them as asserts here to - // minimize their overhead on the product. However, we'll have - // them as guarantees at the beginning / end of the bitmap - // clearing to get some checking in the product. - assert(cmThread()->during_cycle(), "invariant"); - assert(!g1h->mark_in_progress(), "invariant"); - } + ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); + g1h->heap_region_iterate(&cl); - // Clear the liveness counting data - clear_all_count_data(); + // Clear the liveness counting data. If the marking has been aborted, the abort() + // call already did that. + if (cl.complete()) { + clear_all_count_data(); + } // Repeat the asserts from above. guarantee(cmThread()->during_cycle(), "invariant"); --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-07-07 12:35:46.900167529 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-07-07 12:35:46.819165170 +0200 @@ -2640,84 +2640,82 @@ _hrs.iterate(cl); } -void -G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, - uint worker_id, - uint no_of_par_workers, - jint claim_value) { - const uint regions = n_regions(); - const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? - no_of_par_workers : - 1); +uint G1CollectedHeap::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const { assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), + num_workers == workers()->total_workers(), "Non dynamic should use fixed number of workers"); - // try to spread out the starting points of the workers - const HeapRegion* start_hr = - start_region_for_worker(worker_id, no_of_par_workers); - const uint start_index = start_hr->hrs_index(); - - // each worker will actually look at all regions - for (uint count = 0; count < regions; ++count) { - const uint index = (start_index + count) % regions; - assert(0 <= index && index < regions, "sanity"); + return num_regions * worker_i / num_workers; +} + +void G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* blk, + uint worker_id, + uint num_workers, + jint claim_value) const { + uint _allocated_heapregions_length = n_regions(); + + const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length); + + // Every worker will actually look at all regions, skipping over regions that + // are currently not committed. + // This also (potentially) iterates over regions newly allocated during GC. This + // is no problem except for some extra work. + for (uint count = 0; count < _allocated_heapregions_length; count++) { + const uint index = (start_index + count) % _allocated_heapregions_length; + assert(0 <= index && index < _allocated_heapregions_length, "sanity"); + HeapRegion* r = region_at(index); - // we'll ignore "continues humongous" regions (we'll process them + // We'll ignore "continues humongous" regions (we'll process them // when we come across their corresponding "start humongous" - // region) and regions already claimed + // region) and regions already claimed. if (r->claim_value() == claim_value || r->continuesHumongous()) { continue; } // OK, try to claim it - if (r->claimHeapRegion(claim_value)) { - // success! - assert(!r->continuesHumongous(), "sanity"); - if (r->startsHumongous()) { - // If the region is "starts humongous" we'll iterate over its - // "continues humongous" first; in fact we'll do them - // first. The order is important. In on case, calling the - // closure on the "starts humongous" region might de-allocate - // and clear all its "continues humongous" regions and, as a - // result, we might end up processing them twice. So, we'll do - // them first (notice: most closures will ignore them anyway) and - // then we'll do the "starts humongous" region. - for (uint ch_index = index + 1; ch_index < regions; ++ch_index) { - HeapRegion* chr = region_at(ch_index); - - // if the region has already been claimed or it's not - // "continues humongous" we're done - if (chr->claim_value() == claim_value || - !chr->continuesHumongous()) { - break; - } - - // No one should have claimed it directly. We can given - // that we claimed its "starts humongous" region. - assert(chr->claim_value() != claim_value, "sanity"); - assert(chr->humongous_start_region() == r, "sanity"); - - if (chr->claimHeapRegion(claim_value)) { - // we should always be able to claim it; no one else should - // be trying to claim this region - - bool res2 = cl->doHeapRegion(chr); - assert(!res2, "Should not abort"); - - // Right now, this holds (i.e., no closure that actually - // does something with "continues humongous" regions - // clears them). We might have to weaken it in the future, - // but let's leave these two asserts here for extra safety. - assert(chr->continuesHumongous(), "should still be the case"); - assert(chr->humongous_start_region() == r, "sanity"); - } else { - guarantee(false, "we should not reach here"); - } + if (!r->claimHeapRegion(claim_value)) { + continue; + } + // Success! + if (r->startsHumongous()) { + // If the region is "starts humongous" we'll iterate over its + // "continues humongous" first; in fact we'll do them + // first. The order is important. In one case, calling the + // closure on the "starts humongous" region might de-allocate + // and clear all its "continues humongous" regions and, as a + // result, we might end up processing them twice. So, we'll do + // them first (note: most closures will ignore them anyway) and + // then we'll do the "starts humongous" region. + for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { + HeapRegion* chr = region_at(ch_index); + + assert(chr->continuesHumongous(), "Must be humongous region"); + assert(chr->humongous_start_region() == r, + err_msg("Must work on humongous continuation of the original start region " + PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); + assert(chr->claim_value() != claim_value, + "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); + + bool claim_result = chr->claimHeapRegion(claim_value); + // We should always be able to claim it; no one else should + // be trying to claim this region. + guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); + + bool res2 = blk->doHeapRegion(chr); + if (res2) { + return; } + + // Right now, this holds (i.e., no closure that actually + // does something with "continues humongous" regions + // clears them). We might have to weaken it in the future, + // but let's leave these two asserts here for extra safety. + assert(chr->continuesHumongous(), "should still be the case"); + assert(chr->humongous_start_region() == r, "sanity"); } + } - assert(!r->continuesHumongous(), "sanity"); - bool res = cl->doHeapRegion(r); - assert(!res, "Should not abort"); + bool res = blk->doHeapRegion(r); + if (res) { + return; } } } @@ -2897,17 +2895,6 @@ return result; } -HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i, - uint no_of_par_workers) { - uint worker_num = - G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U; - assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), - "Non dynamic should use fixed number of workers"); - const uint start_index = n_regions() * worker_i / worker_num; - return region_at(start_index); -} - void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { HeapRegion* r = g1_policy()->collection_set(); while (r != NULL) { --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2014-07-07 12:35:47.522185638 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2014-07-07 12:35:47.445183396 +0200 @@ -1336,22 +1336,22 @@ inline HeapRegion* region_at(uint index) const; // Divide the heap region sequence into "chunks" of some size (the number - // of regions divided by the number of parallel threads times some - // overpartition factor, currently 4). Assumes that this will be called + // of regions divided by the number of parallel threads). + // Assumes that this will be called // in parallel by ParallelGCThreads worker threads with distinct worker // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel // calls will use the same "claim_value", and that that claim value is // different from the claim_value of any heap region before the start of - // the iteration. Applies "blk->doHeapRegion" to each of the regions, by + // the iteration. + // Applies "blk->doHeapRegion" to each of the regions, by // attempting to claim the first region in each chunk, and, if // successful, applying the closure to each region in the chunk (and // setting the claim value of the second and subsequent regions of the - // chunk.) For now requires that "doHeapRegion" always returns "false", - // i.e., that a closure never attempt to abort a traversal. + // chunk.) void heap_region_par_iterate_chunked(HeapRegionClosure* blk, uint worker, uint no_of_par_workers, - jint claim_value); + jint claim_value) const; // It resets all the region claim values to the default. void reset_heap_region_claim_values(); @@ -1376,10 +1376,10 @@ // starting region for iterating over the current collection set. HeapRegion* start_cset_region_for_worker(uint worker_i); - // This is a convenience method that is used by the - // HeapRegionIterator classes to calculate the starting region for - // each worker so that they do not all start from the same region. - HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); + // This is a convenience method that is used by the to calculate the starting + // region index for each worker so that they do not all start from the same + // region. + uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const; // Iterate over the regions (if any) in the current collection set. void collection_set_iterate(HeapRegionClosure* blk);