1 /* 2 * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentMark.inline.hpp" 27 #include "gc/g1/dirtyCardQueue.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1CollectorState.hpp" 30 #include "gc/g1/g1EvacFailure.hpp" 31 #include "gc/g1/g1OopClosures.inline.hpp" 32 #include "gc/g1/g1_globals.hpp" 33 #include "gc/g1/heapRegion.hpp" 34 #include "gc/g1/heapRegionRemSet.hpp" 35 36 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 37 private: 38 G1CollectedHeap* _g1; 39 DirtyCardQueue *_dcq; 40 G1SATBCardTableModRefBS* _ct_bs; 41 42 public: 43 UpdateRSetDeferred(DirtyCardQueue* dcq) : 44 _g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} 45 46 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 47 virtual void do_oop( oop* p) { do_oop_work(p); } 48 template <class T> void do_oop_work(T* p) { 49 assert(_from->is_in_reserved(p), "paranoia"); 50 assert(!_from->is_survivor(), "Unexpected evac failure in survivor region"); 51 52 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p))) { 53 size_t card_index = _ct_bs->index_for(p); 54 if (_ct_bs->mark_card_deferred(card_index)) { 55 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 56 } 57 } 58 } 59 }; 60 61 class RemoveSelfForwardPtrObjClosure: public ObjectClosure { 62 private: 63 G1CollectedHeap* _g1; 64 ConcurrentMark* _cm; 65 HeapRegion* _hr; 66 size_t _marked_bytes; 67 OopsInHeapRegionClosure *_update_rset_cl; 68 bool _during_initial_mark; 69 uint _worker_id; 70 HeapWord* _last_forwarded_object_end; 71 72 public: 73 RemoveSelfForwardPtrObjClosure(HeapRegion* hr, 74 OopsInHeapRegionClosure* update_rset_cl, 75 bool during_initial_mark, 76 uint worker_id) : 77 _g1(G1CollectedHeap::heap()), 78 _cm(_g1->concurrent_mark()), 79 _hr(hr), 80 _marked_bytes(0), 81 _update_rset_cl(update_rset_cl), 82 _during_initial_mark(during_initial_mark), 83 _worker_id(worker_id), 84 _last_forwarded_object_end(hr->bottom()) { } 85 86 size_t marked_bytes() { return _marked_bytes; } 87 88 // Iterate over the live objects in the region to find self-forwarded objects 89 // that need to be kept live. We need to update the remembered sets of these 90 // objects. Further update the BOT and marks. 91 // We can coalesce and overwrite the remaining heap contents with dummy objects 92 // as they have either been dead or evacuated (which are unreferenced now, i.e. 93 // dead too) already. 94 void do_object(oop obj) { 95 HeapWord* obj_addr = (HeapWord*) obj; 96 assert(_hr->is_in(obj_addr), "sanity"); 97 size_t obj_size = obj->size(); 98 HeapWord* obj_end = obj_addr + obj_size; 99 100 if (obj->is_forwarded() && obj->forwardee() == obj) { 101 // The object failed to move. 102 103 zap_dead_objects(_last_forwarded_object_end, obj_addr); 104 // We consider all objects that we find self-forwarded to be 105 // live. What we'll do is that we'll update the prev marking 106 // info so that they are all under PTAMS and explicitly marked. 107 if (!_cm->isPrevMarked(obj)) { 108 _cm->markPrev(obj); 109 } 110 if (_during_initial_mark) { 111 // For the next marking info we'll only mark the 112 // self-forwarded objects explicitly if we are during 113 // initial-mark (since, normally, we only mark objects pointed 114 // to by roots if we succeed in copying them). By marking all 115 // self-forwarded objects we ensure that we mark any that are 116 // still pointed to be roots. During concurrent marking, and 117 // after initial-mark, we don't need to mark any objects 118 // explicitly and all objects in the CSet are considered 119 // (implicitly) live. So, we won't mark them explicitly and 120 // we'll leave them over NTAMS. 121 _cm->grayRoot(obj, obj_size, _worker_id, _hr); 122 } 123 _marked_bytes += (obj_size * HeapWordSize); 124 obj->set_mark(markOopDesc::prototype()); 125 126 // While we were processing RSet buffers during the collection, 127 // we actually didn't scan any cards on the collection set, 128 // since we didn't want to update remembered sets with entries 129 // that point into the collection set, given that live objects 130 // from the collection set are about to move and such entries 131 // will be stale very soon. 132 // This change also dealt with a reliability issue which 133 // involved scanning a card in the collection set and coming 134 // across an array that was being chunked and looking malformed. 135 // The problem is that, if evacuation fails, we might have 136 // remembered set entries missing given that we skipped cards on 137 // the collection set. So, we'll recreate such entries now. 138 obj->oop_iterate(_update_rset_cl); 139 140 _last_forwarded_object_end = obj_end; 141 _hr->cross_threshold(obj_addr, obj_end); 142 } 143 } 144 145 // Fill the memory area from start to end with filler objects, and update the BOT 146 // and the mark bitmap accordingly. 147 void zap_dead_objects(HeapWord* start, HeapWord* end) { 148 if (start == end) { 149 return; 150 } 151 152 size_t gap_size = pointer_delta(end, start); 153 MemRegion mr(start, gap_size); 154 if (gap_size >= CollectedHeap::min_fill_size()) { 155 CollectedHeap::fill_with_objects(start, gap_size); 156 157 HeapWord* end_first_obj = start + ((oop)start)->size(); 158 _hr->cross_threshold(start, end_first_obj); 159 // Fill_with_objects() may have created multiple (i.e. two) 160 // objects, as the max_fill_size() is half a region. 161 // After updating the BOT for the first object, also update the 162 // BOT for the second object to make the BOT complete. 163 if (end_first_obj != end) { 164 _hr->cross_threshold(end_first_obj, end); 165 #ifdef ASSERT 166 size_t size_second_obj = ((oop)end_first_obj)->size(); 167 HeapWord* end_of_second_obj = end_first_obj + size_second_obj; 168 assert(end == end_of_second_obj, 169 "More than two objects were used to fill the area from " PTR_FORMAT " to " PTR_FORMAT ", " 170 "second objects size " SIZE_FORMAT " ends at " PTR_FORMAT, 171 p2i(start), p2i(end), size_second_obj, p2i(end_of_second_obj)); 172 #endif 173 } 174 } 175 _cm->clearRangePrevBitmap(mr); 176 } 177 178 void zap_remainder() { 179 zap_dead_objects(_last_forwarded_object_end, _hr->top()); 180 } 181 }; 182 183 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 184 G1CollectedHeap* _g1h; 185 uint _worker_id; 186 HeapRegionClaimer* _hrclaimer; 187 188 DirtyCardQueue _dcq; 189 UpdateRSetDeferred _update_rset_cl; 190 191 public: 192 RemoveSelfForwardPtrHRClosure(uint worker_id, 193 HeapRegionClaimer* hrclaimer) : 194 _g1h(G1CollectedHeap::heap()), 195 _dcq(&_g1h->dirty_card_queue_set()), 196 _update_rset_cl(&_dcq), 197 _worker_id(worker_id), 198 _hrclaimer(hrclaimer) { 199 } 200 201 size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr, 202 bool during_initial_mark) { 203 RemoveSelfForwardPtrObjClosure rspc(hr, 204 &_update_rset_cl, 205 during_initial_mark, 206 _worker_id); 207 _update_rset_cl.set_region(hr); 208 hr->object_iterate(&rspc); 209 // Need to zap the remainder area of the processed region. 210 rspc.zap_remainder(); 211 212 return rspc.marked_bytes(); 213 } 214 215 bool doHeapRegion(HeapRegion *hr) { 216 bool during_initial_mark = _g1h->collector_state()->during_initial_mark_pause(); 217 bool during_conc_mark = _g1h->collector_state()->mark_in_progress(); 218 219 assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index()); 220 assert(hr->in_collection_set(), "bad CS"); 221 222 if (_hrclaimer->claim_region(hr->hrm_index())) { 223 if (hr->evacuation_failed()) { 224 hr->note_self_forwarding_removal_start(during_initial_mark, 225 during_conc_mark); 226 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr); 227 228 // In the common case (i.e. when there is no evacuation 229 // failure) we make sure that the following is done when 230 // the region is freed so that it is "ready-to-go" when it's 231 // re-allocated. However, when evacuation failure happens, a 232 // region will remain in the heap and might ultimately be added 233 // to a CSet in the future. So we have to be careful here and 234 // make sure the region's RSet is ready for parallel iteration 235 // whenever this might be required in the future. 236 hr->rem_set()->reset_for_par_iteration(); 237 hr->reset_bot(); 238 239 size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark); 240 241 hr->rem_set()->clean_strong_code_roots(hr); 242 243 hr->note_self_forwarding_removal_end(during_initial_mark, 244 during_conc_mark, 245 live_bytes); 246 } 247 } 248 return false; 249 } 250 }; 251 252 G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() : 253 AbstractGangTask("G1 Remove Self-forwarding Pointers"), 254 _g1h(G1CollectedHeap::heap()), 255 _hrclaimer(_g1h->workers()->active_workers()) { } 256 257 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) { 258 RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer); 259 260 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); 261 _g1h->collection_set_iterate_from(hr, &rsfp_cl); 262 } 263 264 G1RestorePreservedMarksTask::G1RestorePreservedMarksTask(OopAndMarkOopStack* preserved_objs) : 265 AbstractGangTask("G1 Restore Preserved Marks"), 266 _preserved_objs(preserved_objs) {} 267 268 void G1RestorePreservedMarksTask::work(uint worker_id) { 269 OopAndMarkOopStack& cur = _preserved_objs[worker_id]; 270 while (!cur.is_empty()) { 271 OopAndMarkOop elem = cur.pop(); 272 elem.set_mark(); 273 } 274 cur.clear(true); 275 }