2623
2624 class SpaceClosureRegionClosure: public HeapRegionClosure {
2625 SpaceClosure* _cl;
2626 public:
2627 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2628 bool doHeapRegion(HeapRegion* r) {
2629 _cl->do_space(r);
2630 return false;
2631 }
2632 };
2633
2634 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2635 SpaceClosureRegionClosure blk(cl);
2636 heap_region_iterate(&blk);
2637 }
2638
2639 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2640 _hrs.iterate(cl);
2641 }
2642
2643 void
2644 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2645 uint worker_id,
2646 uint no_of_par_workers,
2647 jint claim_value) {
2648 const uint regions = n_regions();
2649 const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2650 no_of_par_workers :
2651 1);
2652 assert(UseDynamicNumberOfGCThreads ||
2653 no_of_par_workers == workers()->total_workers(),
2654 "Non dynamic should use fixed number of workers");
2655 // try to spread out the starting points of the workers
2656 const HeapRegion* start_hr =
2657 start_region_for_worker(worker_id, no_of_par_workers);
2658 const uint start_index = start_hr->hrs_index();
2659
2660 // each worker will actually look at all regions
2661 for (uint count = 0; count < regions; ++count) {
2662 const uint index = (start_index + count) % regions;
2663 assert(0 <= index && index < regions, "sanity");
2664 HeapRegion* r = region_at(index);
2665 // we'll ignore "continues humongous" regions (we'll process them
2666 // when we come across their corresponding "start humongous"
2667 // region) and regions already claimed
2668 if (r->claim_value() == claim_value || r->continuesHumongous()) {
2669 continue;
2670 }
2671 // OK, try to claim it
2672 if (r->claimHeapRegion(claim_value)) {
2673 // success!
2674 assert(!r->continuesHumongous(), "sanity");
2675 if (r->startsHumongous()) {
2676 // If the region is "starts humongous" we'll iterate over its
2677 // "continues humongous" first; in fact we'll do them
2678 // first. The order is important. In on case, calling the
2679 // closure on the "starts humongous" region might de-allocate
2680 // and clear all its "continues humongous" regions and, as a
2681 // result, we might end up processing them twice. So, we'll do
2682 // them first (notice: most closures will ignore them anyway) and
2683 // then we'll do the "starts humongous" region.
2684 for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2685 HeapRegion* chr = region_at(ch_index);
2686
2687 // if the region has already been claimed or it's not
2688 // "continues humongous" we're done
2689 if (chr->claim_value() == claim_value ||
2690 !chr->continuesHumongous()) {
2691 break;
2692 }
2693
2694 // No one should have claimed it directly. We can given
2695 // that we claimed its "starts humongous" region.
2696 assert(chr->claim_value() != claim_value, "sanity");
2697 assert(chr->humongous_start_region() == r, "sanity");
2698
2699 if (chr->claimHeapRegion(claim_value)) {
2700 // we should always be able to claim it; no one else should
2701 // be trying to claim this region
2702
2703 bool res2 = cl->doHeapRegion(chr);
2704 assert(!res2, "Should not abort");
2705
2706 // Right now, this holds (i.e., no closure that actually
2707 // does something with "continues humongous" regions
2708 // clears them). We might have to weaken it in the future,
2709 // but let's leave these two asserts here for extra safety.
2710 assert(chr->continuesHumongous(), "should still be the case");
2711 assert(chr->humongous_start_region() == r, "sanity");
2712 } else {
2713 guarantee(false, "we should not reach here");
2714 }
2715 }
2716 }
2717
2718 assert(!r->continuesHumongous(), "sanity");
2719 bool res = cl->doHeapRegion(r);
2720 assert(!res, "Should not abort");
2721 }
2722 }
2723 }
2724
2725 class ResetClaimValuesClosure: public HeapRegionClosure {
2726 public:
2727 bool doHeapRegion(HeapRegion* r) {
2728 r->set_claim_value(HeapRegion::InitialClaimValue);
2729 return false;
2730 }
2731 };
2732
2733 void G1CollectedHeap::reset_heap_region_claim_values() {
2734 ResetClaimValuesClosure blk;
2735 heap_region_iterate(&blk);
2736 }
2737
2738 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2739 ResetClaimValuesClosure blk;
2740 collection_set_iterate(&blk);
2880 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2881 result = _worker_cset_start_region[worker_i - 1];
2882 }
2883
2884 for (uint i = start_ind; i < end_ind; i++) {
2885 result = result->next_in_collection_set();
2886 }
2887 }
2888
2889 // Note: the calculated starting heap region may be NULL
2890 // (when the collection set is empty).
2891 assert(result == NULL || result->in_collection_set(), "sanity");
2892 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2893 "should be updated only once per pause");
2894 _worker_cset_start_region[worker_i] = result;
2895 OrderAccess::storestore();
2896 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2897 return result;
2898 }
2899
2900 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2901 uint no_of_par_workers) {
2902 uint worker_num =
2903 G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2904 assert(UseDynamicNumberOfGCThreads ||
2905 no_of_par_workers == workers()->total_workers(),
2906 "Non dynamic should use fixed number of workers");
2907 const uint start_index = n_regions() * worker_i / worker_num;
2908 return region_at(start_index);
2909 }
2910
2911 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2912 HeapRegion* r = g1_policy()->collection_set();
2913 while (r != NULL) {
2914 HeapRegion* next = r->next_in_collection_set();
2915 if (cl->doHeapRegion(r)) {
2916 cl->incomplete();
2917 return;
2918 }
2919 r = next;
2920 }
2921 }
2922
2923 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2924 HeapRegionClosure *cl) {
2925 if (r == NULL) {
2926 // The CSet is empty so there's nothing to do.
2927 return;
2928 }
2929
2930 assert(r->in_collection_set(),
|
2623
2624 class SpaceClosureRegionClosure: public HeapRegionClosure {
2625 SpaceClosure* _cl;
2626 public:
2627 SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2628 bool doHeapRegion(HeapRegion* r) {
2629 _cl->do_space(r);
2630 return false;
2631 }
2632 };
2633
2634 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2635 SpaceClosureRegionClosure blk(cl);
2636 heap_region_iterate(&blk);
2637 }
2638
2639 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2640 _hrs.iterate(cl);
2641 }
2642
2643 uint G1CollectedHeap::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
2644 assert(UseDynamicNumberOfGCThreads ||
2645 num_workers == workers()->total_workers(),
2646 "Non dynamic should use fixed number of workers");
2647 return num_regions * worker_i / num_workers;
2648 }
2649
2650 void G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* blk,
2651 uint worker_id,
2652 uint num_workers,
2653 jint claim_value) const {
2654 uint _allocated_heapregions_length = n_regions();
2655
2656 const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
2657
2658 // Every worker will actually look at all regions, skipping over regions that
2659 // are currently not committed.
2660 // This also (potentially) iterates over regions newly allocated during GC. This
2661 // is no problem except for some extra work.
2662 for (uint count = 0; count < _allocated_heapregions_length; count++) {
2663 const uint index = (start_index + count) % _allocated_heapregions_length;
2664 assert(0 <= index && index < _allocated_heapregions_length, "sanity");
2665
2666 HeapRegion* r = region_at(index);
2667 // We'll ignore "continues humongous" regions (we'll process them
2668 // when we come across their corresponding "start humongous"
2669 // region) and regions already claimed.
2670 if (r->claim_value() == claim_value || r->continuesHumongous()) {
2671 continue;
2672 }
2673 // OK, try to claim it
2674 if (!r->claimHeapRegion(claim_value)) {
2675 continue;
2676 }
2677 // Success!
2678 if (r->startsHumongous()) {
2679 // If the region is "starts humongous" we'll iterate over its
2680 // "continues humongous" first; in fact we'll do them
2681 // first. The order is important. In one case, calling the
2682 // closure on the "starts humongous" region might de-allocate
2683 // and clear all its "continues humongous" regions and, as a
2684 // result, we might end up processing them twice. So, we'll do
2685 // them first (note: most closures will ignore them anyway) and
2686 // then we'll do the "starts humongous" region.
2687 for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
2688 HeapRegion* chr = region_at(ch_index);
2689
2690 assert(chr->continuesHumongous(), "Must be humongous region");
2691 assert(chr->humongous_start_region() == r,
2692 err_msg("Must work on humongous continuation of the original start region "
2693 PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
2694 assert(chr->claim_value() != claim_value,
2695 "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
2696
2697 bool claim_result = chr->claimHeapRegion(claim_value);
2698 // We should always be able to claim it; no one else should
2699 // be trying to claim this region.
2700 guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
2701
2702 bool res2 = blk->doHeapRegion(chr);
2703 if (res2) {
2704 return;
2705 }
2706
2707 // Right now, this holds (i.e., no closure that actually
2708 // does something with "continues humongous" regions
2709 // clears them). We might have to weaken it in the future,
2710 // but let's leave these two asserts here for extra safety.
2711 assert(chr->continuesHumongous(), "should still be the case");
2712 assert(chr->humongous_start_region() == r, "sanity");
2713 }
2714 }
2715
2716 bool res = blk->doHeapRegion(r);
2717 if (res) {
2718 return;
2719 }
2720 }
2721 }
2722
2723 class ResetClaimValuesClosure: public HeapRegionClosure {
2724 public:
2725 bool doHeapRegion(HeapRegion* r) {
2726 r->set_claim_value(HeapRegion::InitialClaimValue);
2727 return false;
2728 }
2729 };
2730
2731 void G1CollectedHeap::reset_heap_region_claim_values() {
2732 ResetClaimValuesClosure blk;
2733 heap_region_iterate(&blk);
2734 }
2735
2736 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2737 ResetClaimValuesClosure blk;
2738 collection_set_iterate(&blk);
2878 start_ind = (cs_size * (worker_i - 1)) / active_workers;
2879 result = _worker_cset_start_region[worker_i - 1];
2880 }
2881
2882 for (uint i = start_ind; i < end_ind; i++) {
2883 result = result->next_in_collection_set();
2884 }
2885 }
2886
2887 // Note: the calculated starting heap region may be NULL
2888 // (when the collection set is empty).
2889 assert(result == NULL || result->in_collection_set(), "sanity");
2890 assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2891 "should be updated only once per pause");
2892 _worker_cset_start_region[worker_i] = result;
2893 OrderAccess::storestore();
2894 _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2895 return result;
2896 }
2897
2898 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2899 HeapRegion* r = g1_policy()->collection_set();
2900 while (r != NULL) {
2901 HeapRegion* next = r->next_in_collection_set();
2902 if (cl->doHeapRegion(r)) {
2903 cl->incomplete();
2904 return;
2905 }
2906 r = next;
2907 }
2908 }
2909
2910 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2911 HeapRegionClosure *cl) {
2912 if (r == NULL) {
2913 // The CSet is empty so there's nothing to do.
2914 return;
2915 }
2916
2917 assert(r->in_collection_set(),
|