src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 5917 : [mq]: cleanup-parcopyclosure

*** 4532,4542 **** } G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : ParGCAllocBuffer(gclab_word_size), _retired(false) { } ! G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num) : _g1h(g1h), _refs(g1h->task_queue(queue_num)), _dcq(&g1h->dirty_card_queue_set()), _ct_bs(g1h->g1_barrier_set()), _g1_rem(g1h->g1_rem_set()), --- 4532,4631 ---- } G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : ParGCAllocBuffer(gclab_word_size), _retired(false) { } ! inline oop G1ParScanThreadState::copy_to_survivor_space(oop old) { ! size_t word_sz = old->size(); ! HeapRegion* from_region = _g1h->heap_region_containing_raw(old); ! // +1 to make the -1 indexes valid... ! int young_index = from_region->young_index_in_cset()+1; ! assert( (from_region->is_young() && young_index > 0) || ! (!from_region->is_young() && young_index == 0), "invariant" ); ! G1CollectorPolicy* g1p = _g1h->g1_policy(); ! markOop m = old->mark(); ! int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() ! : m->age(); ! GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, ! word_sz); ! HeapWord* obj_ptr = allocate(alloc_purpose, word_sz); ! #ifndef PRODUCT ! // Should this evacuation fail? ! if (_g1h->evacuation_should_fail()) { ! if (obj_ptr != NULL) { ! undo_allocation(alloc_purpose, obj_ptr, word_sz); ! obj_ptr = NULL; ! } ! } ! #endif // !PRODUCT ! ! if (obj_ptr == NULL) { ! // This will either forward-to-self, or detect that someone else has ! // installed a forwarding pointer. ! return _g1h->handle_evacuation_failure_par(this, old); ! } ! ! oop obj = oop(obj_ptr); ! ! // We're going to allocate linearly, so might as well prefetch ahead. ! Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); ! ! oop forward_ptr = old->forward_to_atomic(obj); ! if (forward_ptr == NULL) { ! Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); ! if (g1p->track_object_age(alloc_purpose)) { ! // We could simply do obj->incr_age(). However, this causes a ! // performance issue. obj->incr_age() will first check whether ! // the object has a displaced mark by checking its mark word; ! // getting the mark word from the new location of the object ! // stalls. So, given that we already have the mark word and we ! // are about to install it anyway, it's better to increase the ! // age on the mark word, when the object does not have a ! // displaced mark word. We're not expecting many objects to have ! // a displaced marked word, so that case is not optimized ! // further (it could be...) and we simply call obj->incr_age(). ! ! if (m->has_displaced_mark_helper()) { ! // in this case, we have to install the mark word first, ! // otherwise obj looks to be forwarded (the old mark word, ! // which contains the forward pointer, was copied) ! obj->set_mark(m); ! obj->incr_age(); ! } else { ! m = m->incr_age(); ! obj->set_mark(m); ! } ! age_table()->add(obj, word_sz); ! } else { ! obj->set_mark(m); ! } ! ! size_t* surv_young_words = surviving_young_words(); ! surv_young_words[young_index] += word_sz; ! ! if (!obj_needs_chunking(obj, word_sz)) { ! // No point in using the slower heap_region_containing() method, ! // given that we know obj is in the heap. ! HeapRegion* obj_region = _g1h->heap_region_containing_raw(obj); ! _scanner.set_region(obj_region); ! obj->oop_iterate_backwards(&_scanner); ! } else { ! // We keep track of the next start index in the length field of ! // the to-space object. The actual length can be found in the ! // length field of the from-space object. ! arrayOop(obj)->set_length(0); ! oop* old_p = set_partial_array_mask(old); ! push_on_queue(old_p); ! } ! } else { ! undo_allocation(alloc_purpose, obj_ptr, word_sz); ! obj = forward_ptr; ! } ! return obj; ! } ! ! G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp) : _g1h(g1h), _refs(g1h->task_queue(queue_num)), _dcq(&g1h->dirty_card_queue_set()), _ct_bs(g1h->g1_barrier_set()), _g1_rem(g1h->g1_rem_set()),
*** 4544,4554 **** _term_attempts(0), _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), _age_table(false), _strong_roots_time(0), _term_time(0), ! _alloc_buffer_waste(0), _undo_waste(0) { // we allocate G1YoungSurvRateNumRegions plus one entries, since // we "sacrifice" entry 0 to keep track of surviving bytes for // non-young regions (where the age is -1) // We also add a few elements at the beginning and at the end in // an attempt to eliminate cache contention --- 4633,4644 ---- _term_attempts(0), _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), _age_table(false), _strong_roots_time(0), _term_time(0), ! _alloc_buffer_waste(0), _undo_waste(0), ! _scanner(g1h, this, rp) { // we allocate G1YoungSurvRateNumRegions plus one entries, since // we "sacrifice" entry 0 to keep track of surviving bytes for // non-young regions (where the age is -1) // We also add a few elements at the beginning and at the end in // an attempt to eliminate cache contention
*** 4632,4644 **** } } #endif // ASSERT void G1ParScanThreadState::trim_queue() { - assert(_evac_cl != NULL, "not set"); assert(_evac_failure_cl != NULL, "not set"); - assert(_partial_scan_cl != NULL, "not set"); StarTask ref; do { // Drain the overflow stack first, so other threads can steal. while (refs()->pop_overflow(ref)) { --- 4722,4732 ----
*** 4651,4681 **** } while (!refs()->is_empty()); } G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : ! _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), ! _par_scan_state(par_scan_state), ! _worker_id(par_scan_state->queue_num()), ! _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()), ! _mark_in_progress(_g1->mark_in_progress()) { } ! template <G1Barrier barrier, bool do_mark_object> ! void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) { #ifdef ASSERT HeapRegion* hr = _g1->heap_region_containing(obj); assert(hr != NULL, "sanity"); assert(!hr->in_collection_set(), "should not mark objects in the CSet"); #endif // ASSERT // We know that the object is not moving so it's safe to read its size. _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); } ! template <G1Barrier barrier, bool do_mark_object> ! void G1ParCopyClosure<barrier, do_mark_object> ! ::mark_forwarded_object(oop from_obj, oop to_obj) { #ifdef ASSERT assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj != to_obj, "should not be self-forwarded"); --- 4739,4763 ---- } while (!refs()->is_empty()); } G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : ! _g1(g1), _par_scan_state(par_scan_state), ! _worker_id(par_scan_state->queue_num()) { } ! void G1ParCopyHelper::mark_object(oop obj) { #ifdef ASSERT HeapRegion* hr = _g1->heap_region_containing(obj); assert(hr != NULL, "sanity"); assert(!hr->in_collection_set(), "should not mark objects in the CSet"); #endif // ASSERT // We know that the object is not moving so it's safe to read its size. _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); } ! void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { #ifdef ASSERT assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj != to_obj, "should not be self-forwarded");
*** 4693,4814 **** // well-formed. So we have to read its size from its from-space // image which we know should not be changing. _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); } - template <G1Barrier barrier, bool do_mark_object> - oop G1ParCopyClosure<barrier, do_mark_object> - ::copy_to_survivor_space(oop old) { - size_t word_sz = old->size(); - HeapRegion* from_region = _g1->heap_region_containing_raw(old); - // +1 to make the -1 indexes valid... - int young_index = from_region->young_index_in_cset()+1; - assert( (from_region->is_young() && young_index > 0) || - (!from_region->is_young() && young_index == 0), "invariant" ); - G1CollectorPolicy* g1p = _g1->g1_policy(); - markOop m = old->mark(); - int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() - : m->age(); - GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, - word_sz); - HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); - #ifndef PRODUCT - // Should this evacuation fail? - if (_g1->evacuation_should_fail()) { - if (obj_ptr != NULL) { - _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); - obj_ptr = NULL; - } - } - #endif // !PRODUCT - - if (obj_ptr == NULL) { - // This will either forward-to-self, or detect that someone else has - // installed a forwarding pointer. - return _g1->handle_evacuation_failure_par(_par_scan_state, old); - } - - oop obj = oop(obj_ptr); - - // We're going to allocate linearly, so might as well prefetch ahead. - Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); - - oop forward_ptr = old->forward_to_atomic(obj); - if (forward_ptr == NULL) { - Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); - if (g1p->track_object_age(alloc_purpose)) { - // We could simply do obj->incr_age(). However, this causes a - // performance issue. obj->incr_age() will first check whether - // the object has a displaced mark by checking its mark word; - // getting the mark word from the new location of the object - // stalls. So, given that we already have the mark word and we - // are about to install it anyway, it's better to increase the - // age on the mark word, when the object does not have a - // displaced mark word. We're not expecting many objects to have - // a displaced marked word, so that case is not optimized - // further (it could be...) and we simply call obj->incr_age(). - - if (m->has_displaced_mark_helper()) { - // in this case, we have to install the mark word first, - // otherwise obj looks to be forwarded (the old mark word, - // which contains the forward pointer, was copied) - obj->set_mark(m); - obj->incr_age(); - } else { - m = m->incr_age(); - obj->set_mark(m); - } - _par_scan_state->age_table()->add(obj, word_sz); - } else { - obj->set_mark(m); - } - - size_t* surv_young_words = _par_scan_state->surviving_young_words(); - surv_young_words[young_index] += word_sz; - - if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { - // We keep track of the next start index in the length field of - // the to-space object. The actual length can be found in the - // length field of the from-space object. - arrayOop(obj)->set_length(0); - oop* old_p = set_partial_array_mask(old); - _par_scan_state->push_on_queue(old_p); - } else { - // No point in using the slower heap_region_containing() method, - // given that we know obj is in the heap. - _scanner.set_region(_g1->heap_region_containing_raw(obj)); - obj->oop_iterate_backwards(&_scanner); - } - } else { - _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); - obj = forward_ptr; - } - return obj; - } - template <class T> void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { if (_g1->heap_region_containing_raw(new_obj)->is_young()) { _scanned_klass->record_modified_oops(); } } template <G1Barrier barrier, bool do_mark_object> template <class T> ! void G1ParCopyClosure<barrier, do_mark_object> ! ::do_oop_work(T* p) { ! oop obj = oopDesc::load_decode_heap_oop(p); assert(_worker_id == _par_scan_state->queue_num(), "sanity"); - // here the null check is implicit in the cset_fast_test() test if (_g1->in_cset_fast_test(obj)) { oop forwardee; if (obj->is_forwarded()) { forwardee = obj->forwardee(); } else { ! forwardee = copy_to_survivor_space(obj); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); if (do_mark_object && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly --- 4775,4807 ---- // well-formed. So we have to read its size from its from-space // image which we know should not be changing. _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id); } template <class T> void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) { if (_g1->heap_region_containing_raw(new_obj)->is_young()) { _scanned_klass->record_modified_oops(); } } template <G1Barrier barrier, bool do_mark_object> template <class T> ! void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) { ! T heap_oop = oopDesc::load_heap_oop(p); ! ! if (!oopDesc::is_null(heap_oop)) { ! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); assert(_worker_id == _par_scan_state->queue_num(), "sanity"); if (_g1->in_cset_fast_test(obj)) { oop forwardee; if (obj->is_forwarded()) { forwardee = obj->forwardee(); } else { ! forwardee = _par_scan_state->copy_to_survivor_space(obj); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); if (do_mark_object && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly
*** 4821,4892 **** } } else { // The object is not in collection set. If we're a root scanning // closure during an initial mark pause (i.e. do_mark_object will // be true) then attempt to mark the object. ! if (do_mark_object && _g1->is_in_g1_reserved(obj)) { mark_object(obj); } } ! if (barrier == G1BarrierEvac && obj != NULL) { _par_scan_state->update_rs(_from, p, _worker_id); } } template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); - template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) { - assert(has_partial_array_mask(p), "invariant"); - oop from_obj = clear_partial_array_mask(p); - - assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); - assert(from_obj->is_objArray(), "must be obj array"); - objArrayOop from_obj_array = objArrayOop(from_obj); - // The from-space object contains the real length. - int length = from_obj_array->length(); - - assert(from_obj->is_forwarded(), "must be forwarded"); - oop to_obj = from_obj->forwardee(); - assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); - objArrayOop to_obj_array = objArrayOop(to_obj); - // We keep track of the next start index in the length field of the - // to-space object. - int next_index = to_obj_array->length(); - assert(0 <= next_index && next_index < length, - err_msg("invariant, next index: %d, length: %d", next_index, length)); - - int start = next_index; - int end = length; - int remainder = end - start; - // We'll try not to push a range that's smaller than ParGCArrayScanChunk. - if (remainder > 2 * ParGCArrayScanChunk) { - end = start + ParGCArrayScanChunk; - to_obj_array->set_length(end); - // Push the remainder before we process the range in case another - // worker has run out of things to do and can steal it. - oop* from_obj_p = set_partial_array_mask(from_obj); - _par_scan_state->push_on_queue(from_obj_p); - } else { - assert(length == end, "sanity"); - // We'll process the final range for this object. Restore the length - // so that the heap remains parsable in case of evacuation failure. - to_obj_array->set_length(end); - } - _scanner.set_region(_g1->heap_region_containing_raw(to_obj)); - // Process indexes [start,end). It will also process the header - // along with the first chunk (i.e., the chunk with start == 0). - // Note that at this point the length field of to_obj_array is not - // correct given that we are using it to keep track of the next - // start index. oop_iterate_range() (thankfully!) ignores the length - // field and only relies on the start / end parameters. It does - // however return the size of the object which will be incorrect. So - // we have to ignore it even if we wanted to use it. - to_obj_array->oop_iterate_range(&_scanner, start, end); - } - class G1ParEvacuateFollowersClosure : public VoidClosure { protected: G1CollectedHeap* _g1h; G1ParScanThreadState* _par_scan_state; RefToScanQueueSet* _queues; --- 4814,4839 ---- } } else { // The object is not in collection set. If we're a root scanning // closure during an initial mark pause (i.e. do_mark_object will // be true) then attempt to mark the object. ! if (do_mark_object) { ! assert(_g1->is_in_g1_reserved(obj), "Must reference an object within the heap"); mark_object(obj); } } ! if (barrier == G1BarrierEvac) { ! assert(obj != NULL, "Must be"); _par_scan_state->update_rs(_from, p, _worker_id); } + } } template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p); template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p); class G1ParEvacuateFollowersClosure : public VoidClosure { protected: G1CollectedHeap* _g1h; G1ParScanThreadState* _par_scan_state; RefToScanQueueSet* _queues;
*** 5023,5040 **** ResourceMark rm; HandleMark hm; ReferenceProcessor* rp = _g1h->ref_processor_stw(); ! G1ParScanThreadState pss(_g1h, worker_id); ! G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); - G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp); - pss.set_evac_closure(&scan_evac_cl); pss.set_evac_failure_closure(&evac_failure_cl); - pss.set_partial_scan_closure(&partial_scan_cl); G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp); --- 4970,4983 ---- ResourceMark rm; HandleMark hm; ReferenceProcessor* rp = _g1h->ref_processor_stw(); ! G1ParScanThreadState pss(_g1h, worker_id, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); pss.set_evac_failure_closure(&evac_failure_cl); G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp); G1ParScanMetadataClosure only_scan_metadata_cl(_g1h, &pss, rp); G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
*** 5454,5472 **** ResourceMark rm; HandleMark hm; G1STWIsAliveClosure is_alive(_g1h); ! G1ParScanThreadState pss(_g1h, worker_id); - G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); - G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); - pss.set_evac_closure(&scan_evac_cl); pss.set_evac_failure_closure(&evac_failure_cl); - pss.set_partial_scan_closure(&partial_scan_cl); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); --- 5397,5411 ---- ResourceMark rm; HandleMark hm; G1STWIsAliveClosure is_alive(_g1h); ! G1ParScanThreadState pss(_g1h, worker_id, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); pss.set_evac_failure_closure(&evac_failure_cl); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
*** 5566,5583 **** void work(uint worker_id) { ResourceMark rm; HandleMark hm; ! G1ParScanThreadState pss(_g1h, worker_id); ! G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); - G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL); - pss.set_evac_closure(&scan_evac_cl); pss.set_evac_failure_closure(&evac_failure_cl); - pss.set_partial_scan_closure(&partial_scan_cl); assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); --- 5505,5518 ---- void work(uint worker_id) { ResourceMark rm; HandleMark hm; ! G1ParScanThreadState pss(_g1h, worker_id, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); pss.set_evac_failure_closure(&evac_failure_cl); assert(pss.refs()->is_empty(), "both queue and overflow should be empty"); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
*** 5692,5713 **** // of JNI refs is serial and performed serially by the current thread // rather than by a worker. The following PSS will be used for processing // JNI refs. // Use only a single queue for this PSS. ! G1ParScanThreadState pss(this, 0); // We do not embed a reference processor in the copying/scanning // closures while we're actually processing the discovered // reference objects. - G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL); G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); - G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL); - pss.set_evac_closure(&scan_evac_cl); pss.set_evac_failure_closure(&evac_failure_cl); - pss.set_partial_scan_closure(&partial_scan_cl); assert(pss.refs()->is_empty(), "pre-condition"); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); --- 5627,5644 ---- // of JNI refs is serial and performed serially by the current thread // rather than by a worker. The following PSS will be used for processing // JNI refs. // Use only a single queue for this PSS. ! G1ParScanThreadState pss(this, 0, NULL); // We do not embed a reference processor in the copying/scanning // closures while we're actually processing the discovered // reference objects. G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); pss.set_evac_failure_closure(&evac_failure_cl); assert(pss.refs()->is_empty(), "pre-condition"); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL);