1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/dirtyCardQueue.hpp" 28 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1FromCardCache.hpp" 31 #include "gc/g1/g1GCPhaseTimes.hpp" 32 #include "gc/g1/g1HotCardCache.hpp" 33 #include "gc/g1/g1OopClosures.inline.hpp" 34 #include "gc/g1/g1RemSet.inline.hpp" 35 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" 36 #include "gc/g1/heapRegion.inline.hpp" 37 #include "gc/g1/heapRegionManager.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/g1/suspendibleThreadSet.hpp" 40 #include "gc/shared/gcTraceTime.inline.hpp" 41 #include "memory/iterator.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/intHisto.hpp" 47 #include "utilities/stack.inline.hpp" 48 49 // Collects information about the overall remembered set scan progress during an evacuation. 50 class G1RemSetScanState : public CHeapObj<mtGC> { 51 private: 52 class G1ClearCardTableTask : public AbstractGangTask { 53 G1CollectedHeap* _g1h; 54 uint* _dirty_region_list; 55 size_t _num_dirty_regions; 56 size_t _chunk_length; 57 58 size_t volatile _cur_dirty_regions; 59 public: 60 G1ClearCardTableTask(G1CollectedHeap* g1h, 61 uint* dirty_region_list, 62 size_t num_dirty_regions, 63 size_t chunk_length) : 64 AbstractGangTask("G1 Clear Card Table Task"), 65 _g1h(g1h), 66 _dirty_region_list(dirty_region_list), 67 _num_dirty_regions(num_dirty_regions), 68 _chunk_length(chunk_length), 69 _cur_dirty_regions(0) { 70 71 assert(chunk_length > 0, "must be"); 72 } 73 74 static size_t chunk_size() { return M; } 75 76 void work(uint worker_id) { 77 G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); 78 79 while (_cur_dirty_regions < _num_dirty_regions) { 80 size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 81 size_t max = MIN2(next + _chunk_length, _num_dirty_regions); 82 83 for (size_t i = next; i < max; i++) { 84 HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); 85 if (!r->is_survivor()) { 86 ct_bs->clear(MemRegion(r->bottom(), r->end())); 87 } 88 } 89 } 90 } 91 }; 92 93 size_t _max_regions; 94 95 // Scan progress for the remembered set of a single region. Transitions from 96 // Unclaimed -> Claimed -> Complete. 97 // At each of the transitions the thread that does the transition needs to perform 98 // some special action once. This is the reason for the extra "Claimed" state. 99 typedef jint G1RemsetIterState; 100 101 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 102 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 103 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 104 105 G1RemsetIterState volatile* _iter_states; 106 // The current location where the next thread should continue scanning in a region's 107 // remembered set. 108 size_t volatile* _iter_claims; 109 110 // Temporary buffer holding the regions we used to store remembered set scan duplicate 111 // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region) 112 uint* _dirty_region_buffer; 113 114 typedef jbyte IsDirtyRegionState; 115 static const IsDirtyRegionState Clean = 0; 116 static const IsDirtyRegionState Dirty = 1; 117 // Holds a flag for every region whether it is in the _dirty_region_buffer already 118 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. 119 IsDirtyRegionState* _in_dirty_region_buffer; 120 size_t _cur_dirty_region; 121 122 // Creates a snapshot of the current _top values at the start of collection to 123 // filter out card marks that we do not want to scan. 124 class G1ResetScanTopClosure : public HeapRegionClosure { 125 private: 126 HeapWord** _scan_top; 127 public: 128 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } 129 130 virtual bool doHeapRegion(HeapRegion* r) { 131 uint hrm_index = r->hrm_index(); 132 if (!r->in_collection_set() && r->is_old_or_humongous()) { 133 _scan_top[hrm_index] = r->top(); 134 } else { 135 _scan_top[hrm_index] = r->bottom(); 136 } 137 return false; 138 } 139 }; 140 141 // For each region, contains the maximum top() value to be used during this garbage 142 // collection. Subsumes common checks like filtering out everything but old and 143 // humongous regions outside the collection set. 144 // This is valid because we are not interested in scanning stray remembered set 145 // entries from free or archive regions. 146 HeapWord** _scan_top; 147 public: 148 G1RemSetScanState() : 149 _max_regions(0), 150 _iter_states(NULL), 151 _iter_claims(NULL), 152 _dirty_region_buffer(NULL), 153 _in_dirty_region_buffer(NULL), 154 _cur_dirty_region(0), 155 _scan_top(NULL) { 156 } 157 158 ~G1RemSetScanState() { 159 if (_iter_states != NULL) { 160 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); 161 } 162 if (_iter_claims != NULL) { 163 FREE_C_HEAP_ARRAY(size_t, _iter_claims); 164 } 165 if (_dirty_region_buffer != NULL) { 166 FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer); 167 } 168 if (_in_dirty_region_buffer != NULL) { 169 FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer); 170 } 171 if (_scan_top != NULL) { 172 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 173 } 174 } 175 176 void initialize(uint max_regions) { 177 assert(_iter_states == NULL, "Must not be initialized twice"); 178 assert(_iter_claims == NULL, "Must not be initialized twice"); 179 _max_regions = max_regions; 180 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 181 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 182 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 183 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); 184 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 185 } 186 187 void reset() { 188 for (uint i = 0; i < _max_regions; i++) { 189 _iter_states[i] = Unclaimed; 190 } 191 192 G1ResetScanTopClosure cl(_scan_top); 193 G1CollectedHeap::heap()->heap_region_iterate(&cl); 194 195 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); 196 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); 197 _cur_dirty_region = 0; 198 } 199 200 // Attempt to claim the remembered set of the region for iteration. Returns true 201 // if this call caused the transition from Unclaimed to Claimed. 202 inline bool claim_iter(uint region) { 203 assert(region < _max_regions, "Tried to access invalid region %u", region); 204 if (_iter_states[region] != Unclaimed) { 205 return false; 206 } 207 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed); 208 return (res == Unclaimed); 209 } 210 211 // Try to atomically sets the iteration state to "complete". Returns true for the 212 // thread that caused the transition. 213 inline bool set_iter_complete(uint region) { 214 if (iter_is_complete(region)) { 215 return false; 216 } 217 jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed); 218 return (res == Claimed); 219 } 220 221 // Returns true if the region's iteration is complete. 222 inline bool iter_is_complete(uint region) const { 223 assert(region < _max_regions, "Tried to access invalid region %u", region); 224 return _iter_states[region] == Complete; 225 } 226 227 // The current position within the remembered set of the given region. 228 inline size_t iter_claimed(uint region) const { 229 assert(region < _max_regions, "Tried to access invalid region %u", region); 230 return _iter_claims[region]; 231 } 232 233 // Claim the next block of cards within the remembered set of the region with 234 // step size. 235 inline size_t iter_claimed_next(uint region, size_t step) { 236 return Atomic::add(step, &_iter_claims[region]) - step; 237 } 238 239 void add_dirty_region(uint region) { 240 if (_in_dirty_region_buffer[region] == Dirty) { 241 return; 242 } 243 244 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; 245 if (marked_as_dirty) { 246 size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1; 247 _dirty_region_buffer[allocated] = region; 248 } 249 } 250 251 HeapWord* scan_top(uint region_idx) const { 252 return _scan_top[region_idx]; 253 } 254 255 // Clear the card table of "dirty" regions. 256 void clear_card_table(WorkGang* workers) { 257 if (_cur_dirty_region == 0) { 258 return; 259 } 260 261 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); 262 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 263 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; 264 265 // Iterate over the dirty cards region list. 266 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length); 267 268 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " " 269 "units of work for " SIZE_FORMAT " regions.", 270 cl.name(), num_workers, num_chunks, _cur_dirty_region); 271 workers->run_task(&cl, num_workers); 272 273 #ifndef PRODUCT 274 // Need to synchronize with concurrent cleanup since it needs to 275 // finish its card table clearing before we can verify. 276 G1CollectedHeap::heap()->wait_while_free_regions_coming(); 277 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 278 #endif 279 } 280 }; 281 282 G1RemSet::G1RemSet(G1CollectedHeap* g1, 283 CardTableModRefBS* ct_bs, 284 G1HotCardCache* hot_card_cache) : 285 _g1(g1), 286 _scan_state(new G1RemSetScanState()), 287 _num_conc_refined_cards(0), 288 _ct_bs(ct_bs), 289 _g1p(_g1->g1_policy()), 290 _hot_card_cache(hot_card_cache), 291 _prev_period_summary() 292 { 293 } 294 295 G1RemSet::~G1RemSet() { 296 if (_scan_state != NULL) { 297 delete _scan_state; 298 } 299 } 300 301 uint G1RemSet::num_par_rem_sets() { 302 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); 303 } 304 305 void G1RemSet::initialize(size_t capacity, uint max_regions) { 306 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 307 _scan_state->initialize(max_regions); 308 { 309 GCTraceTime(Debug, gc, marking)("Initialize Card Live Data"); 310 _card_live_data.initialize(capacity, max_regions); 311 } 312 if (G1PretouchAuxiliaryMemory) { 313 GCTraceTime(Debug, gc, marking)("Pre-Touch Card Live Data"); 314 _card_live_data.pretouch(); 315 } 316 } 317 318 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, 319 G1ScanObjsDuringScanRSClosure* scan_obj_on_card, 320 CodeBlobClosure* code_root_cl, 321 uint worker_i) : 322 _scan_state(scan_state), 323 _scan_objs_on_card_cl(scan_obj_on_card), 324 _code_root_cl(code_root_cl), 325 _strong_code_root_scan_time_sec(0.0), 326 _cards_claimed(0), 327 _cards_scanned(0), 328 _cards_skipped(0), 329 _worker_i(worker_i) { 330 _g1h = G1CollectedHeap::heap(); 331 _bot = _g1h->bot(); 332 _ct_bs = _g1h->g1_barrier_set(); 333 } 334 335 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) { 336 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 337 _scan_objs_on_card_cl->set_region(card_region); 338 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl); 339 _cards_scanned++; 340 } 341 342 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) { 343 double scan_start = os::elapsedTime(); 344 r->strong_code_roots_do(_code_root_cl); 345 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 346 } 347 348 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){ 349 _ct_bs->set_card_claimed(card_index); 350 _scan_state->add_dirty_region(region_idx_for_card); 351 } 352 353 bool G1ScanRSForRegionClosure::doHeapRegion(HeapRegion* r) { 354 assert(r->in_collection_set(), "should only be called on elements of CS."); 355 uint region_idx = r->hrm_index(); 356 357 if (_scan_state->iter_is_complete(region_idx)) { 358 return false; 359 } 360 if (_scan_state->claim_iter(region_idx)) { 361 // If we ever free the collection set concurrently, we should also 362 // clear the card table concurrently therefore we won't need to 363 // add regions of the collection set to the dirty cards region. 364 _scan_state->add_dirty_region(region_idx); 365 } 366 367 // We claim cards in blocks so as to reduce the contention. 368 size_t const block_size = G1RSetScanBlockSize; 369 370 HeapRegionRemSetIterator iter(r->rem_set()); 371 size_t card_index; 372 373 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 374 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 375 if (current_card >= claimed_card_block + block_size) { 376 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 377 } 378 if (current_card < claimed_card_block) { 379 _cards_skipped++; 380 continue; 381 } 382 _cards_claimed++; 383 384 // If the card is dirty, then G1 will scan it during Update RS. 385 if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) { 386 continue; 387 } 388 389 HeapWord* const card_start = _g1h->bot()->address_for_index(card_index); 390 uint const region_idx_for_card = _g1h->addr_to_region(card_start); 391 392 assert(_g1h->region_at(region_idx_for_card)->is_in_reserved(card_start), 393 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 394 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 395 if (card_start >= top) { 396 continue; 397 } 398 399 // We claim lazily (so races are possible but they're benign), which reduces the 400 // number of duplicate scans (the rsets of the regions in the cset can intersect). 401 // Claim the card after checking bounds above: the remembered set may contain 402 // random cards into current survivor, and we would then have an incorrectly 403 // claimed card in survivor space. Card table clear does not reset the card table 404 // of survivor space regions. 405 claim_card(card_index, region_idx_for_card); 406 407 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top)); 408 409 scan_card(mr, region_idx_for_card); 410 } 411 if (_scan_state->set_iter_complete(region_idx)) { 412 // Scan the strong code root list attached to the current region 413 scan_strong_code_roots(r); 414 } 415 return false; 416 } 417 418 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, 419 CodeBlobClosure* heap_region_codeblobs, 420 uint worker_i) { 421 double rs_time_start = os::elapsedTime(); 422 423 G1ScanObjsDuringScanRSClosure scan_cl(_g1, pss); 424 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i); 425 _g1->collection_set_iterate_from(&cl, worker_i); 426 427 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - 428 cl.strong_code_root_scan_time_sec(); 429 430 G1GCPhaseTimes* p = _g1p->phase_times(); 431 432 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 433 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards); 434 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards); 435 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards); 436 437 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec()); 438 } 439 440 // Closure used for updating rem sets. Only called during an evacuation pause. 441 class G1RefineCardClosure: public CardTableEntryClosure { 442 G1RemSet* _g1rs; 443 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl; 444 445 size_t _cards_scanned; 446 size_t _cards_skipped; 447 public: 448 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) : 449 _g1rs(g1h->g1_rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) 450 {} 451 452 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 453 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 454 455 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl); 456 457 if (card_scanned) { 458 _cards_scanned++; 459 } else { 460 _cards_skipped++; 461 } 462 return true; 463 } 464 465 size_t cards_scanned() const { return _cards_scanned; } 466 size_t cards_skipped() const { return _cards_skipped; } 467 }; 468 469 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) { 470 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1, pss, worker_i); 471 G1RefineCardClosure refine_card_cl(_g1, &update_rs_cl); 472 473 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 474 if (G1HotCardCache::default_use_cache()) { 475 // Apply the closure to the entries of the hot card cache. 476 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 477 _g1->iterate_hcc_closure(&refine_card_cl, worker_i); 478 } 479 // Apply the closure to all remaining log entries. 480 _g1->iterate_dirty_card_closure(&refine_card_cl, worker_i); 481 482 G1GCPhaseTimes* p = _g1p->phase_times(); 483 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards); 484 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards); 485 } 486 487 void G1RemSet::cleanupHRRS() { 488 HeapRegionRemSet::cleanup(); 489 } 490 491 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, 492 CodeBlobClosure* heap_region_codeblobs, 493 uint worker_i) { 494 update_rem_set(pss, worker_i); 495 scan_rem_set(pss, heap_region_codeblobs, worker_i);; 496 } 497 498 void G1RemSet::prepare_for_oops_into_collection_set_do() { 499 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 500 dcqs.concatenate_logs(); 501 502 _scan_state->reset(); 503 } 504 505 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 506 G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times(); 507 508 // Set all cards back to clean. 509 double start = os::elapsedTime(); 510 _scan_state->clear_card_table(_g1->workers()); 511 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 512 } 513 514 class G1ScrubRSClosure: public HeapRegionClosure { 515 G1CollectedHeap* _g1h; 516 G1CardLiveData* _live_data; 517 public: 518 G1ScrubRSClosure(G1CardLiveData* live_data) : 519 _g1h(G1CollectedHeap::heap()), 520 _live_data(live_data) { } 521 522 bool doHeapRegion(HeapRegion* r) { 523 if (!r->is_continues_humongous()) { 524 r->rem_set()->scrub(_live_data); 525 } 526 return false; 527 } 528 }; 529 530 void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) { 531 G1ScrubRSClosure scrub_cl(&_card_live_data); 532 _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer); 533 } 534 535 inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) { 536 #ifdef ASSERT 537 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 538 assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)), 539 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 540 p2i(card_ptr), 541 ct_bs->index_for(ct_bs->addr_for(card_ptr)), 542 p2i(ct_bs->addr_for(card_ptr)), 543 g1->addr_to_region(ct_bs->addr_for(card_ptr))); 544 #endif 545 } 546 547 void G1RemSet::refine_card_concurrently(jbyte* card_ptr, 548 uint worker_i) { 549 assert(!_g1->is_gc_active(), "Only call concurrently"); 550 551 check_card_ptr(card_ptr, _ct_bs); 552 553 // If the card is no longer dirty, nothing to do. 554 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 555 return; 556 } 557 558 // Construct the region representing the card. 559 HeapWord* start = _ct_bs->addr_for(card_ptr); 560 // And find the region containing it. 561 HeapRegion* r = _g1->heap_region_containing(start); 562 563 // This check is needed for some uncommon cases where we should 564 // ignore the card. 565 // 566 // The region could be young. Cards for young regions are 567 // distinctly marked (set to g1_young_gen), so the post-barrier will 568 // filter them out. However, that marking is performed 569 // concurrently. A write to a young object could occur before the 570 // card has been marked young, slipping past the filter. 571 // 572 // The card could be stale, because the region has been freed since 573 // the card was recorded. In this case the region type could be 574 // anything. If (still) free or (reallocated) young, just ignore 575 // it. If (reallocated) old or humongous, the later card trimming 576 // and additional checks in iteration may detect staleness. At 577 // worst, we end up processing a stale card unnecessarily. 578 // 579 // In the normal (non-stale) case, the synchronization between the 580 // enqueueing of the card and processing it here will have ensured 581 // we see the up-to-date region type here. 582 if (!r->is_old_or_humongous()) { 583 return; 584 } 585 586 // While we are processing RSet buffers during the collection, we 587 // actually don't want to scan any cards on the collection set, 588 // since we don't want to update remembered sets with entries that 589 // point into the collection set, given that live objects from the 590 // collection set are about to move and such entries will be stale 591 // very soon. This change also deals with a reliability issue which 592 // involves scanning a card in the collection set and coming across 593 // an array that was being chunked and looking malformed. Note, 594 // however, that if evacuation fails, we have to scan any objects 595 // that were not moved and create any missing entries. 596 if (r->in_collection_set()) { 597 return; 598 } 599 600 // The result from the hot card cache insert call is either: 601 // * pointer to the current card 602 // (implying that the current card is not 'hot'), 603 // * null 604 // (meaning we had inserted the card ptr into the "hot" card cache, 605 // which had some headroom), 606 // * a pointer to a "hot" card that was evicted from the "hot" cache. 607 // 608 609 if (_hot_card_cache->use_cache()) { 610 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 611 612 const jbyte* orig_card_ptr = card_ptr; 613 card_ptr = _hot_card_cache->insert(card_ptr); 614 if (card_ptr == NULL) { 615 // There was no eviction. Nothing to do. 616 return; 617 } else if (card_ptr != orig_card_ptr) { 618 // Original card was inserted and an old card was evicted. 619 start = _ct_bs->addr_for(card_ptr); 620 r = _g1->heap_region_containing(start); 621 622 // Check whether the region formerly in the cache should be 623 // ignored, as discussed earlier for the original card. The 624 // region could have been freed while in the cache. The cset is 625 // not relevant here, since we're in concurrent phase. 626 if (!r->is_old_or_humongous()) { 627 return; 628 } 629 } // Else we still have the original card. 630 } 631 632 // Trim the region designated by the card to what's been allocated 633 // in the region. The card could be stale, or the card could cover 634 // (part of) an object at the end of the allocated space and extend 635 // beyond the end of allocation. 636 637 // Non-humongous objects are only allocated in the old-gen during 638 // GC, so if region is old then top is stable. Humongous object 639 // allocation sets top last; if top has not yet been set, this is 640 // a stale card and we'll end up with an empty intersection. If 641 // this is not a stale card, the synchronization between the 642 // enqueuing of the card and processing it here will have ensured 643 // we see the up-to-date top here. 644 HeapWord* scan_limit = r->top(); 645 646 if (scan_limit <= start) { 647 // If the trimmed region is empty, the card must be stale. 648 return; 649 } 650 651 // Okay to clean and process the card now. There are still some 652 // stale card cases that may be detected by iteration and dealt with 653 // as iteration failure. 654 *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val(); 655 656 // This fence serves two purposes. First, the card must be cleaned 657 // before processing the contents. Second, we can't proceed with 658 // processing until after the read of top, for synchronization with 659 // possibly concurrent humongous object allocation. It's okay that 660 // reading top and reading type were racy wrto each other. We need 661 // both set, in any order, to proceed. 662 OrderAccess::fence(); 663 664 // Don't use addr_for(card_ptr + 1) which can ask for 665 // a card beyond the heap. 666 HeapWord* end = start + CardTableModRefBS::card_size_in_words; 667 MemRegion dirty_region(start, MIN2(scan_limit, end)); 668 assert(!dirty_region.is_empty(), "sanity"); 669 670 G1ConcurrentRefineOopClosure conc_refine_cl(_g1, worker_i); 671 672 bool card_processed = 673 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl); 674 675 // If unable to process the card then we encountered an unparsable 676 // part of the heap (e.g. a partially allocated object) while 677 // processing a stale card. Despite the card being stale, redirty 678 // and re-enqueue, because we've already cleaned the card. Without 679 // this we could incorrectly discard a non-stale card. 680 if (!card_processed) { 681 // The card might have gotten re-dirtied and re-enqueued while we 682 // worked. (In fact, it's pretty likely.) 683 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 684 *card_ptr = CardTableModRefBS::dirty_card_val(); 685 MutexLockerEx x(Shared_DirtyCardQ_lock, 686 Mutex::_no_safepoint_check_flag); 687 DirtyCardQueue* sdcq = 688 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 689 sdcq->enqueue(card_ptr); 690 } 691 } else { 692 _num_conc_refined_cards++; // Unsynchronized update, only used for logging. 693 } 694 } 695 696 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr, 697 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { 698 assert(_g1->is_gc_active(), "Only call during GC"); 699 700 check_card_ptr(card_ptr, _ct_bs); 701 702 // If the card is no longer dirty, nothing to do. This covers cards that were already 703 // scanned as parts of the remembered sets. 704 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 705 return false; 706 } 707 708 // During GC we can immediately clean the card since we will not re-enqueue stale 709 // cards as we know they can be disregarded. 710 *card_ptr = CardTableModRefBS::clean_card_val(); 711 712 // Construct the region representing the card. 713 HeapWord* card_start = _ct_bs->addr_for(card_ptr); 714 // And find the region containing it. 715 HeapRegion* r = _g1->heap_region_containing(card_start); 716 717 HeapWord* scan_limit = _scan_state->scan_top(r->hrm_index()); 718 if (scan_limit <= card_start) { 719 // If the card starts above the area in the region containing objects to scan, skip it. 720 return false; 721 } 722 723 // Don't use addr_for(card_ptr + 1) which can ask for 724 // a card beyond the heap. 725 HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words; 726 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); 727 assert(!dirty_region.is_empty(), "sanity"); 728 729 update_rs_cl->set_region(r); 730 bool card_processed = r->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl); 731 assert(card_processed, "must be"); 732 return true; 733 } 734 735 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 736 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 737 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 738 739 G1RemSetSummary current(this); 740 _prev_period_summary.subtract_from(¤t); 741 742 Log(gc, remset) log; 743 log.trace("%s", header); 744 ResourceMark rm; 745 _prev_period_summary.print_on(log.trace_stream()); 746 747 _prev_period_summary.set(¤t); 748 } 749 } 750 751 void G1RemSet::print_summary_info() { 752 Log(gc, remset, exit) log; 753 if (log.is_trace()) { 754 log.trace(" Cumulative RS summary"); 755 G1RemSetSummary current(this); 756 ResourceMark rm; 757 current.print_on(log.trace_stream()); 758 } 759 } 760 761 void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 762 _card_live_data.create(workers, mark_bitmap); 763 } 764 765 void G1RemSet::finalize_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { 766 _card_live_data.finalize(workers, mark_bitmap); 767 } 768 769 void G1RemSet::verify_card_live_data(WorkGang* workers, G1CMBitMap* bitmap) { 770 _card_live_data.verify(workers, bitmap); 771 } 772 773 void G1RemSet::clear_card_live_data(WorkGang* workers) { 774 _card_live_data.clear(workers); 775 } 776 777 #ifdef ASSERT 778 void G1RemSet::verify_card_live_data_is_clear() { 779 _card_live_data.verify_is_clear(); 780 } 781 #endif