2961 RegionData* const beg = sd.region(beg_region);
2962 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2963
2964 // Regions up to new_top() are enqueued if they become available.
2965 HeapWord* const new_top = _space_info[src_space_id].new_top();
2966 RegionData* const enqueue_end =
2967 sd.addr_to_region_ptr(sd.region_align_up(new_top));
2968
2969 for (RegionData* cur = beg; cur < end; ++cur) {
2970 assert(cur->data_size() > 0, "region must have live data");
2971 cur->decrement_destination_count();
2972 if (cur < enqueue_end && cur->available() && cur->claim()) {
2973 if (cur->mark_normal()) {
2974 cm->push_region(sd.region(cur));
2975 } else if (cur->mark_copied()) {
2976 // Try to copy the content of the shadow region back to its corresponding
2977 // heap region if the shadow region is filled. Otherwise, the GC thread
2978 // fills the shadow region will copy the data back (see
2979 // MoveAndUpdateShadowClosure::complete_region).
2980 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2981 cm->push_shadow_region_mt_safe(cur->shadow_region());
2982 cur->set_completed();
2983 }
2984 }
2985 }
2986 }
2987
2988 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2989 SpaceId& src_space_id,
2990 HeapWord*& src_space_top,
2991 HeapWord* end_addr)
2992 {
2993 typedef ParallelCompactData::RegionData RegionData;
2994
2995 ParallelCompactData& sd = PSParallelCompact::summary_data();
2996 const size_t region_size = ParallelCompactData::RegionSize;
2997
2998 size_t src_region_idx = 0;
2999
3000 // Skip empty regions (if any) up to the top of the space.
3001 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3157 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3158 end_addr);
3159 } while (true);
3160 }
3161
3162 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
3163 {
3164 MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3165 fill_region(cm, cl, region_idx);
3166 }
3167
3168 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
3169 {
3170 // Get a shadow region first
3171 ParallelCompactData& sd = summary_data();
3172 RegionData* const region_ptr = sd.region(region_idx);
3173 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
3174 // The InvalidShadow return value indicates the corresponding heap region is available,
3175 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
3176 // MoveAndUpdateShadowClosure to fill the acquired shadow region.
3177 if (shadow_region == cm->InvalidShadow) {
3178 MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3179 region_ptr->shadow_to_normal();
3180 return fill_region(cm, cl, region_idx);
3181 } else {
3182 MoveAndUpdateShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region);
3183 return fill_region(cm, cl, region_idx);
3184 }
3185 }
3186
3187 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
3188 {
3189 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
3190 }
3191
3192 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx)
3193 {
3194 size_t next = cm->next_shadow_region();
3195 ParallelCompactData& sd = summary_data();
3196 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
3197 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
3385 update_state(words);
3386 assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3387 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3388 }
3389
3390 void MoveAndUpdateShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3391 PSParallelCompact::RegionData *region_ptr) {
3392 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
3393 // Record the shadow region index
3394 region_ptr->set_shadow_region(_shadow);
3395 // Mark the shadow region as filled to indicate the data is ready to be
3396 // copied back
3397 region_ptr->mark_filled();
3398 // Try to copy the content of the shadow region back to its corresponding
3399 // heap region if available; the GC thread that decreases the destination
3400 // count to zero will do the copying otherwise (see
3401 // PSParallelCompact::decrement_destination_counts).
3402 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
3403 region_ptr->set_completed();
3404 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
3405 cm->push_shadow_region_mt_safe(_shadow);
3406 }
3407 }
3408
3409 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3410 ParCompactionManager* cm,
3411 PSParallelCompact::SpaceId space_id) :
3412 ParMarkBitMapClosure(mbm, cm),
3413 _space_id(space_id),
3414 _start_array(PSParallelCompact::start_array(space_id))
3415 {
3416 }
3417
3418 // Updates the references in the object to their new values.
3419 ParMarkBitMapClosure::IterationStatus
3420 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3421 do_addr(addr);
3422 return ParMarkBitMap::incomplete;
3423 }
3424
3425 FillClosure::FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|
2961 RegionData* const beg = sd.region(beg_region);
2962 RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2963
2964 // Regions up to new_top() are enqueued if they become available.
2965 HeapWord* const new_top = _space_info[src_space_id].new_top();
2966 RegionData* const enqueue_end =
2967 sd.addr_to_region_ptr(sd.region_align_up(new_top));
2968
2969 for (RegionData* cur = beg; cur < end; ++cur) {
2970 assert(cur->data_size() > 0, "region must have live data");
2971 cur->decrement_destination_count();
2972 if (cur < enqueue_end && cur->available() && cur->claim()) {
2973 if (cur->mark_normal()) {
2974 cm->push_region(sd.region(cur));
2975 } else if (cur->mark_copied()) {
2976 // Try to copy the content of the shadow region back to its corresponding
2977 // heap region if the shadow region is filled. Otherwise, the GC thread
2978 // fills the shadow region will copy the data back (see
2979 // MoveAndUpdateShadowClosure::complete_region).
2980 copy_back(sd.region_to_addr(cur->shadow_region()), sd.region_to_addr(cur));
2981 ParCompactionManager::push_shadow_region_mt_safe(cur->shadow_region());
2982 cur->set_completed();
2983 }
2984 }
2985 }
2986 }
2987
2988 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2989 SpaceId& src_space_id,
2990 HeapWord*& src_space_top,
2991 HeapWord* end_addr)
2992 {
2993 typedef ParallelCompactData::RegionData RegionData;
2994
2995 ParallelCompactData& sd = PSParallelCompact::summary_data();
2996 const size_t region_size = ParallelCompactData::RegionSize;
2997
2998 size_t src_region_idx = 0;
2999
3000 // Skip empty regions (if any) up to the top of the space.
3001 HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
3157 src_region_idx = next_src_region(closure, src_space_id, src_space_top,
3158 end_addr);
3159 } while (true);
3160 }
3161
3162 void PSParallelCompact::fill_and_update_region(ParCompactionManager* cm, size_t region_idx)
3163 {
3164 MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3165 fill_region(cm, cl, region_idx);
3166 }
3167
3168 void PSParallelCompact::fill_and_update_shadow_region(ParCompactionManager* cm, size_t region_idx)
3169 {
3170 // Get a shadow region first
3171 ParallelCompactData& sd = summary_data();
3172 RegionData* const region_ptr = sd.region(region_idx);
3173 size_t shadow_region = ParCompactionManager::pop_shadow_region_mt_safe(region_ptr);
3174 // The InvalidShadow return value indicates the corresponding heap region is available,
3175 // so use MoveAndUpdateClosure to fill the normal region. Otherwise, use
3176 // MoveAndUpdateShadowClosure to fill the acquired shadow region.
3177 if (shadow_region == ParCompactionManager::InvalidShadow) {
3178 MoveAndUpdateClosure cl(mark_bitmap(), cm, region_idx);
3179 region_ptr->shadow_to_normal();
3180 return fill_region(cm, cl, region_idx);
3181 } else {
3182 MoveAndUpdateShadowClosure cl(mark_bitmap(), cm, region_idx, shadow_region);
3183 return fill_region(cm, cl, region_idx);
3184 }
3185 }
3186
3187 void PSParallelCompact::copy_back(HeapWord *shadow_addr, HeapWord *region_addr)
3188 {
3189 Copy::aligned_conjoint_words(shadow_addr, region_addr, _summary_data.RegionSize);
3190 }
3191
3192 bool PSParallelCompact::steal_unavailable_region(ParCompactionManager* cm, size_t ®ion_idx)
3193 {
3194 size_t next = cm->next_shadow_region();
3195 ParallelCompactData& sd = summary_data();
3196 size_t old_new_top = sd.addr_to_region_idx(_space_info[old_space_id].new_top());
3197 uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
3385 update_state(words);
3386 assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3387 return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3388 }
3389
3390 void MoveAndUpdateShadowClosure::complete_region(ParCompactionManager *cm, HeapWord *dest_addr,
3391 PSParallelCompact::RegionData *region_ptr) {
3392 assert(region_ptr->shadow_state() == ParallelCompactData::RegionData::ShadowRegion, "Region should be shadow");
3393 // Record the shadow region index
3394 region_ptr->set_shadow_region(_shadow);
3395 // Mark the shadow region as filled to indicate the data is ready to be
3396 // copied back
3397 region_ptr->mark_filled();
3398 // Try to copy the content of the shadow region back to its corresponding
3399 // heap region if available; the GC thread that decreases the destination
3400 // count to zero will do the copying otherwise (see
3401 // PSParallelCompact::decrement_destination_counts).
3402 if (((region_ptr->available() && region_ptr->claim()) || region_ptr->claimed()) && region_ptr->mark_copied()) {
3403 region_ptr->set_completed();
3404 PSParallelCompact::copy_back(PSParallelCompact::summary_data().region_to_addr(_shadow), dest_addr);
3405 ParCompactionManager::push_shadow_region_mt_safe(_shadow);
3406 }
3407 }
3408
3409 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3410 ParCompactionManager* cm,
3411 PSParallelCompact::SpaceId space_id) :
3412 ParMarkBitMapClosure(mbm, cm),
3413 _space_id(space_id),
3414 _start_array(PSParallelCompact::start_array(space_id))
3415 {
3416 }
3417
3418 // Updates the references in the object to their new values.
3419 ParMarkBitMapClosure::IterationStatus
3420 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3421 do_addr(addr);
3422 return ParMarkBitMap::incomplete;
3423 }
3424
3425 FillClosure::FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
|