1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 28 #include "gc/g1/heapRegion.hpp" 29 #include "gc/g1/satbQueue.hpp" 30 #include "gc/shared/memset_with_concurrent_readers.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.inline.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/orderAccess.inline.hpp" 35 #include "runtime/thread.inline.hpp" 36 37 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS( 38 MemRegion whole_heap, 39 const BarrierSet::FakeRtti& fake_rtti) : 40 CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT)) 41 { } 42 43 void G1SATBCardTableModRefBS::enqueue(oop pre_val) { 44 // Nulls should have been already filtered. 45 assert(pre_val->is_oop(true), "Error"); 46 47 if (!JavaThread::satb_mark_queue_set().is_active()) return; 48 Thread* thr = Thread::current(); 49 if (thr->is_Java_thread()) { 50 JavaThread* jt = (JavaThread*)thr; 51 jt->satb_mark_queue().enqueue(pre_val); 52 } else { 53 MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); 54 JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); 55 } 56 } 57 58 template <class T> void 59 G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) { 60 if (!JavaThread::satb_mark_queue_set().is_active()) return; 61 T* elem_ptr = dst; 62 for (int i = 0; i < count; i++, elem_ptr++) { 63 T heap_oop = oopDesc::load_heap_oop(elem_ptr); 64 if (!oopDesc::is_null(heap_oop)) { 65 enqueue(oopDesc::decode_heap_oop_not_null(heap_oop)); 66 } 67 } 68 } 69 70 void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { 71 if (!dest_uninitialized) { 72 write_ref_array_pre_work(dst, count); 73 } 74 } 75 void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { 76 if (!dest_uninitialized) { 77 write_ref_array_pre_work(dst, count); 78 } 79 } 80 81 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { 82 jbyte val = _byte_map[card_index]; 83 // It's already processed 84 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 85 return false; 86 } 87 88 if (val == g1_young_gen) { 89 // the card is for a young gen region. We don't need to keep track of all pointers into young 90 return false; 91 } 92 93 // Cached bit can be installed either on a clean card or on a claimed card. 94 jbyte new_val = val; 95 if (val == clean_card_val()) { 96 new_val = (jbyte)deferred_card_val(); 97 } else { 98 if (val & claimed_card_val()) { 99 new_val = val | (jbyte)deferred_card_val(); 100 } 101 } 102 if (new_val != val) { 103 Atomic::cmpxchg(new_val, &_byte_map[card_index], val); 104 } 105 return true; 106 } 107 108 void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) { 109 jbyte *const first = byte_for(mr.start()); 110 jbyte *const last = byte_after(mr.last()); 111 112 memset_with_concurrent_readers(first, g1_young_gen, last - first); 113 } 114 115 #ifndef PRODUCT 116 void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) { 117 verify_region(mr, g1_young_gen, true); 118 } 119 #endif 120 121 void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { 122 // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter. 123 MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); 124 _card_table->clear(mr); 125 } 126 127 G1SATBCardTableLoggingModRefBS:: 128 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) : 129 G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)), 130 _dcqs(JavaThread::dirty_card_queue_set()), 131 _listener() 132 { 133 _listener.set_card_table(this); 134 } 135 136 void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { 137 mapper->set_mapping_changed_listener(&_listener); 138 139 _byte_map_size = mapper->reserved().byte_size(); 140 141 _guard_index = cards_required(_whole_heap.word_size()) - 1; 142 _last_valid_index = _guard_index - 1; 143 144 HeapWord* low_bound = _whole_heap.start(); 145 HeapWord* high_bound = _whole_heap.end(); 146 147 _cur_covered_regions = 1; 148 _covered[0] = _whole_heap; 149 150 _byte_map = (jbyte*) mapper->reserved().start(); 151 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 152 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 153 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); 154 155 if (TraceCardTableModRefBS) { 156 gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); 157 gclog_or_tty->print_cr(" " 158 " &_byte_map[0]: " INTPTR_FORMAT 159 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, 160 p2i(&_byte_map[0]), 161 p2i(&_byte_map[_last_valid_index])); 162 gclog_or_tty->print_cr(" " 163 " byte_map_base: " INTPTR_FORMAT, 164 p2i(byte_map_base)); 165 } 166 } 167 168 void 169 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, 170 oop new_val, 171 bool release) { 172 volatile jbyte* byte = byte_for(field); 173 if (*byte == g1_young_gen) { 174 return; 175 } 176 OrderAccess::storeload(); 177 if (*byte != dirty_card) { 178 *byte = dirty_card; 179 Thread* thr = Thread::current(); 180 if (thr->is_Java_thread()) { 181 JavaThread* jt = (JavaThread*)thr; 182 jt->dirty_card_queue().enqueue(byte); 183 } else { 184 MutexLockerEx x(Shared_DirtyCardQ_lock, 185 Mutex::_no_safepoint_check_flag); 186 _dcqs.shared_dirty_card_queue()->enqueue(byte); 187 } 188 } 189 } 190 191 void 192 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field, 193 oop new_val) { 194 uintptr_t field_uint = (uintptr_t)field; 195 uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val); 196 uintptr_t comb = field_uint ^ new_val_uint; 197 comb = comb >> HeapRegion::LogOfHRGrainBytes; 198 if (comb == 0) return; 199 if (new_val == NULL) return; 200 // Otherwise, log it. 201 G1SATBCardTableLoggingModRefBS* g1_bs = 202 barrier_set_cast<G1SATBCardTableLoggingModRefBS>(G1CollectedHeap::heap()->barrier_set()); 203 g1_bs->write_ref_field_work(field, new_val, false); 204 } 205 206 void 207 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { 208 volatile jbyte* byte = byte_for(mr.start()); 209 jbyte* last_byte = byte_for(mr.last()); 210 Thread* thr = Thread::current(); 211 if (whole_heap) { 212 while (byte <= last_byte) { 213 *byte = dirty_card; 214 byte++; 215 } 216 } else { 217 // skip all consecutive young cards 218 for (; byte <= last_byte && *byte == g1_young_gen; byte++); 219 220 if (byte <= last_byte) { 221 OrderAccess::storeload(); 222 // Enqueue if necessary. 223 if (thr->is_Java_thread()) { 224 JavaThread* jt = (JavaThread*)thr; 225 for (; byte <= last_byte; byte++) { 226 if (*byte == g1_young_gen) { 227 continue; 228 } 229 if (*byte != dirty_card) { 230 *byte = dirty_card; 231 jt->dirty_card_queue().enqueue(byte); 232 } 233 } 234 } else { 235 MutexLockerEx x(Shared_DirtyCardQ_lock, 236 Mutex::_no_safepoint_check_flag); 237 for (; byte <= last_byte; byte++) { 238 if (*byte == g1_young_gen) { 239 continue; 240 } 241 if (*byte != dirty_card) { 242 *byte = dirty_card; 243 _dcqs.shared_dirty_card_queue()->enqueue(byte); 244 } 245 } 246 } 247 } 248 } 249 } 250 251 void G1SATBCardTableModRefBS::write_ref_nmethod_post(oop* dst, nmethod* nm) { 252 oop obj = oopDesc::load_heap_oop(dst); 253 if (obj != NULL) { 254 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 255 HeapRegion* hr = g1h->heap_region_containing(obj); 256 hr->add_strong_code_root(nm); 257 } 258 } 259 260 class G1EnsureLastRefToRegion : public OopClosure { 261 G1CollectedHeap* _g1h; 262 HeapRegion* _hr; 263 oop* _dst; 264 265 bool _value; 266 public: 267 G1EnsureLastRefToRegion(G1CollectedHeap* g1h, HeapRegion* hr, oop* dst) : 268 _g1h(g1h), _hr(hr), _dst(dst), _value(true) {} 269 270 void do_oop(oop* p) { 271 if (_value && p != _dst) { 272 oop obj = oopDesc::load_heap_oop(p); 273 if (obj != NULL) { 274 HeapRegion* hr = _g1h->heap_region_containing(obj); 275 if (hr == _hr) { 276 // Another reference to the same region. 277 _value = false; 278 } 279 } 280 } 281 } 282 void do_oop(narrowOop* p) { ShouldNotReachHere(); } 283 bool value() const { return _value; } 284 }; 285 286 void G1SATBCardTableModRefBS::write_ref_nmethod_pre(oop* dst, nmethod* nm) { 287 oop obj = oopDesc::load_heap_oop(dst); 288 if (obj != NULL) { 289 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 290 HeapRegion* hr = g1h->heap_region_containing(obj); 291 G1EnsureLastRefToRegion ensure_last_ref(g1h, hr, dst); 292 nm->oops_do(&ensure_last_ref); 293 if (ensure_last_ref.value()) { 294 // Last reference to this region, remove the nmethod from the rset. 295 hr->remove_strong_code_root(nm); 296 } 297 } 298 }