rev 49539 : imported patch 8178105-switch-at-remark rev 49542 : imported patch 8178105-8200371-assert-problem
1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/dirtyCardQueue.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CardTable.inline.hpp" 29 #include "gc/g1/g1CollectedHeap.inline.hpp" 30 #include "gc/g1/g1ConcurrentRefine.hpp" 31 #include "gc/g1/g1FromCardCache.hpp" 32 #include "gc/g1/g1GCPhaseTimes.hpp" 33 #include "gc/g1/g1HotCardCache.hpp" 34 #include "gc/g1/g1OopClosures.inline.hpp" 35 #include "gc/g1/g1RemSet.hpp" 36 #include "gc/g1/heapRegion.inline.hpp" 37 #include "gc/g1/heapRegionManager.inline.hpp" 38 #include "gc/g1/heapRegionRemSet.hpp" 39 #include "gc/shared/gcTraceTime.inline.hpp" 40 #include "gc/shared/suspendibleThreadSet.hpp" 41 #include "memory/iterator.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/access.inline.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "utilities/align.hpp" 46 #include "utilities/globalDefinitions.hpp" 47 #include "utilities/intHisto.hpp" 48 #include "utilities/stack.inline.hpp" 49 #include "utilities/ticks.inline.hpp" 50 51 // Collects information about the overall remembered set scan progress during an evacuation. 52 class G1RemSetScanState : public CHeapObj<mtGC> { 53 private: 54 class G1ClearCardTableTask : public AbstractGangTask { 55 G1CollectedHeap* _g1h; 56 uint* _dirty_region_list; 57 size_t _num_dirty_regions; 58 size_t _chunk_length; 59 60 size_t volatile _cur_dirty_regions; 61 public: 62 G1ClearCardTableTask(G1CollectedHeap* g1h, 63 uint* dirty_region_list, 64 size_t num_dirty_regions, 65 size_t chunk_length) : 66 AbstractGangTask("G1 Clear Card Table Task"), 67 _g1h(g1h), 68 _dirty_region_list(dirty_region_list), 69 _num_dirty_regions(num_dirty_regions), 70 _chunk_length(chunk_length), 71 _cur_dirty_regions(0) { 72 73 assert(chunk_length > 0, "must be"); 74 } 75 76 static size_t chunk_size() { return M; } 77 78 void work(uint worker_id) { 79 while (_cur_dirty_regions < _num_dirty_regions) { 80 size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; 81 size_t max = MIN2(next + _chunk_length, _num_dirty_regions); 82 83 for (size_t i = next; i < max; i++) { 84 HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); 85 if (!r->is_survivor()) { 86 r->clear_cardtable(); 87 } 88 } 89 } 90 } 91 }; 92 93 size_t _max_regions; 94 95 // Scan progress for the remembered set of a single region. Transitions from 96 // Unclaimed -> Claimed -> Complete. 97 // At each of the transitions the thread that does the transition needs to perform 98 // some special action once. This is the reason for the extra "Claimed" state. 99 typedef jint G1RemsetIterState; 100 101 static const G1RemsetIterState Unclaimed = 0; // The remembered set has not been scanned yet. 102 static const G1RemsetIterState Claimed = 1; // The remembered set is currently being scanned. 103 static const G1RemsetIterState Complete = 2; // The remembered set has been completely scanned. 104 105 G1RemsetIterState volatile* _iter_states; 106 // The current location where the next thread should continue scanning in a region's 107 // remembered set. 108 size_t volatile* _iter_claims; 109 110 // Temporary buffer holding the regions we used to store remembered set scan duplicate 111 // information. These are also called "dirty". Valid entries are from [0.._cur_dirty_region) 112 uint* _dirty_region_buffer; 113 114 typedef jbyte IsDirtyRegionState; 115 static const IsDirtyRegionState Clean = 0; 116 static const IsDirtyRegionState Dirty = 1; 117 // Holds a flag for every region whether it is in the _dirty_region_buffer already 118 // to avoid duplicates. Uses jbyte since there are no atomic instructions for bools. 119 IsDirtyRegionState* _in_dirty_region_buffer; 120 size_t _cur_dirty_region; 121 122 // Creates a snapshot of the current _top values at the start of collection to 123 // filter out card marks that we do not want to scan. 124 class G1ResetScanTopClosure : public HeapRegionClosure { 125 private: 126 HeapWord** _scan_top; 127 public: 128 G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } 129 130 virtual bool do_heap_region(HeapRegion* r) { 131 uint hrm_index = r->hrm_index(); 132 if (!r->in_collection_set() && r->is_old_or_humongous()) { 133 _scan_top[hrm_index] = r->top(); 134 } else { 135 _scan_top[hrm_index] = r->bottom(); 136 } 137 return false; 138 } 139 }; 140 141 // For each region, contains the maximum top() value to be used during this garbage 142 // collection. Subsumes common checks like filtering out everything but old and 143 // humongous regions outside the collection set. 144 // This is valid because we are not interested in scanning stray remembered set 145 // entries from free or archive regions. 146 HeapWord** _scan_top; 147 public: 148 G1RemSetScanState() : 149 _max_regions(0), 150 _iter_states(NULL), 151 _iter_claims(NULL), 152 _dirty_region_buffer(NULL), 153 _in_dirty_region_buffer(NULL), 154 _cur_dirty_region(0), 155 _scan_top(NULL) { 156 } 157 158 ~G1RemSetScanState() { 159 if (_iter_states != NULL) { 160 FREE_C_HEAP_ARRAY(G1RemsetIterState, _iter_states); 161 } 162 if (_iter_claims != NULL) { 163 FREE_C_HEAP_ARRAY(size_t, _iter_claims); 164 } 165 if (_dirty_region_buffer != NULL) { 166 FREE_C_HEAP_ARRAY(uint, _dirty_region_buffer); 167 } 168 if (_in_dirty_region_buffer != NULL) { 169 FREE_C_HEAP_ARRAY(IsDirtyRegionState, _in_dirty_region_buffer); 170 } 171 if (_scan_top != NULL) { 172 FREE_C_HEAP_ARRAY(HeapWord*, _scan_top); 173 } 174 } 175 176 void initialize(uint max_regions) { 177 assert(_iter_states == NULL, "Must not be initialized twice"); 178 assert(_iter_claims == NULL, "Must not be initialized twice"); 179 _max_regions = max_regions; 180 _iter_states = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC); 181 _iter_claims = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC); 182 _dirty_region_buffer = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC); 183 _in_dirty_region_buffer = NEW_C_HEAP_ARRAY(IsDirtyRegionState, max_regions, mtGC); 184 _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC); 185 } 186 187 void reset() { 188 for (uint i = 0; i < _max_regions; i++) { 189 _iter_states[i] = Unclaimed; 190 } 191 192 G1ResetScanTopClosure cl(_scan_top); 193 G1CollectedHeap::heap()->heap_region_iterate(&cl); 194 195 memset((void*)_iter_claims, 0, _max_regions * sizeof(size_t)); 196 memset(_in_dirty_region_buffer, Clean, _max_regions * sizeof(IsDirtyRegionState)); 197 _cur_dirty_region = 0; 198 } 199 200 // Attempt to claim the remembered set of the region for iteration. Returns true 201 // if this call caused the transition from Unclaimed to Claimed. 202 inline bool claim_iter(uint region) { 203 assert(region < _max_regions, "Tried to access invalid region %u", region); 204 if (_iter_states[region] != Unclaimed) { 205 return false; 206 } 207 G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed); 208 return (res == Unclaimed); 209 } 210 211 // Try to atomically sets the iteration state to "complete". Returns true for the 212 // thread that caused the transition. 213 inline bool set_iter_complete(uint region) { 214 if (iter_is_complete(region)) { 215 return false; 216 } 217 G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed); 218 return (res == Claimed); 219 } 220 221 // Returns true if the region's iteration is complete. 222 inline bool iter_is_complete(uint region) const { 223 assert(region < _max_regions, "Tried to access invalid region %u", region); 224 return _iter_states[region] == Complete; 225 } 226 227 // The current position within the remembered set of the given region. 228 inline size_t iter_claimed(uint region) const { 229 assert(region < _max_regions, "Tried to access invalid region %u", region); 230 return _iter_claims[region]; 231 } 232 233 // Claim the next block of cards within the remembered set of the region with 234 // step size. 235 inline size_t iter_claimed_next(uint region, size_t step) { 236 return Atomic::add(step, &_iter_claims[region]) - step; 237 } 238 239 void add_dirty_region(uint region) { 240 if (_in_dirty_region_buffer[region] == Dirty) { 241 return; 242 } 243 244 bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean; 245 if (marked_as_dirty) { 246 size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1; 247 _dirty_region_buffer[allocated] = region; 248 } 249 } 250 251 HeapWord* scan_top(uint region_idx) const { 252 return _scan_top[region_idx]; 253 } 254 255 // Clear the card table of "dirty" regions. 256 void clear_card_table(WorkGang* workers) { 257 if (_cur_dirty_region == 0) { 258 return; 259 } 260 261 size_t const num_chunks = align_up(_cur_dirty_region * HeapRegion::CardsPerRegion, G1ClearCardTableTask::chunk_size()) / G1ClearCardTableTask::chunk_size(); 262 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers()); 263 size_t const chunk_length = G1ClearCardTableTask::chunk_size() / HeapRegion::CardsPerRegion; 264 265 // Iterate over the dirty cards region list. 266 G1ClearCardTableTask cl(G1CollectedHeap::heap(), _dirty_region_buffer, _cur_dirty_region, chunk_length); 267 268 log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " " 269 "units of work for " SIZE_FORMAT " regions.", 270 cl.name(), num_workers, num_chunks, _cur_dirty_region); 271 workers->run_task(&cl, num_workers); 272 273 #ifndef PRODUCT 274 G1CollectedHeap::heap()->verifier()->verify_card_table_cleanup(); 275 #endif 276 } 277 }; 278 279 G1RemSet::G1RemSet(G1CollectedHeap* g1, 280 G1CardTable* ct, 281 G1HotCardCache* hot_card_cache) : 282 _g1(g1), 283 _scan_state(new G1RemSetScanState()), 284 _num_conc_refined_cards(0), 285 _ct(ct), 286 _g1p(_g1->g1_policy()), 287 _hot_card_cache(hot_card_cache), 288 _prev_period_summary() { 289 } 290 291 G1RemSet::~G1RemSet() { 292 if (_scan_state != NULL) { 293 delete _scan_state; 294 } 295 } 296 297 uint G1RemSet::num_par_rem_sets() { 298 return DirtyCardQueueSet::num_par_ids() + G1ConcurrentRefine::max_num_threads() + MAX2(ConcGCThreads, ParallelGCThreads); 299 } 300 301 void G1RemSet::initialize(size_t capacity, uint max_regions) { 302 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 303 _scan_state->initialize(max_regions); 304 } 305 306 G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, 307 G1ScanObjsDuringScanRSClosure* scan_obj_on_card, 308 CodeBlobClosure* code_root_cl, 309 uint worker_i) : 310 _scan_state(scan_state), 311 _scan_objs_on_card_cl(scan_obj_on_card), 312 _code_root_cl(code_root_cl), 313 _strong_code_root_scan_time_sec(0.0), 314 _cards_claimed(0), 315 _cards_scanned(0), 316 _cards_skipped(0), 317 _worker_i(worker_i) { 318 _g1h = G1CollectedHeap::heap(); 319 _bot = _g1h->bot(); 320 _ct = _g1h->card_table(); 321 } 322 323 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) { 324 HeapRegion* const card_region = _g1h->region_at(region_idx_for_card); 325 _scan_objs_on_card_cl->set_region(card_region); 326 card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl); 327 _cards_scanned++; 328 } 329 330 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) { 331 double scan_start = os::elapsedTime(); 332 r->strong_code_roots_do(_code_root_cl); 333 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 334 } 335 336 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){ 337 _ct->set_card_claimed(card_index); 338 _scan_state->add_dirty_region(region_idx_for_card); 339 } 340 341 bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) { 342 assert(r->in_collection_set(), "should only be called on elements of CS."); 343 uint region_idx = r->hrm_index(); 344 345 if (_scan_state->iter_is_complete(region_idx)) { 346 return false; 347 } 348 if (_scan_state->claim_iter(region_idx)) { 349 // If we ever free the collection set concurrently, we should also 350 // clear the card table concurrently therefore we won't need to 351 // add regions of the collection set to the dirty cards region. 352 _scan_state->add_dirty_region(region_idx); 353 } 354 355 // We claim cards in blocks so as to reduce the contention. 356 size_t const block_size = G1RSetScanBlockSize; 357 358 HeapRegionRemSetIterator iter(r->rem_set()); 359 size_t card_index; 360 361 size_t claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 362 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 363 if (current_card >= claimed_card_block + block_size) { 364 claimed_card_block = _scan_state->iter_claimed_next(region_idx, block_size); 365 } 366 if (current_card < claimed_card_block) { 367 _cards_skipped++; 368 continue; 369 } 370 _cards_claimed++; 371 372 // If the card is dirty, then G1 will scan it during Update RS. 373 if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) { 374 continue; 375 } 376 377 HeapWord* const card_start = _g1h->bot()->address_for_index(card_index); 378 uint const region_idx_for_card = _g1h->addr_to_region(card_start); 379 380 assert(_g1h->region_at(region_idx_for_card)->is_in_reserved(card_start), 381 "Card start " PTR_FORMAT " to scan outside of region %u", p2i(card_start), _g1h->region_at(region_idx_for_card)->hrm_index()); 382 HeapWord* const top = _scan_state->scan_top(region_idx_for_card); 383 if (card_start >= top) { 384 continue; 385 } 386 387 // We claim lazily (so races are possible but they're benign), which reduces the 388 // number of duplicate scans (the rsets of the regions in the cset can intersect). 389 // Claim the card after checking bounds above: the remembered set may contain 390 // random cards into current survivor, and we would then have an incorrectly 391 // claimed card in survivor space. Card table clear does not reset the card table 392 // of survivor space regions. 393 claim_card(card_index, region_idx_for_card); 394 395 MemRegion const mr(card_start, MIN2(card_start + BOTConstants::N_words, top)); 396 397 scan_card(mr, region_idx_for_card); 398 } 399 if (_scan_state->set_iter_complete(region_idx)) { 400 // Scan the strong code root list attached to the current region 401 scan_strong_code_roots(r); 402 } 403 return false; 404 } 405 406 void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, 407 CodeBlobClosure* heap_region_codeblobs, 408 uint worker_i) { 409 double rs_time_start = os::elapsedTime(); 410 411 G1ScanObjsDuringScanRSClosure scan_cl(_g1, pss); 412 G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i); 413 _g1->collection_set_iterate_from(&cl, worker_i); 414 415 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) - 416 cl.strong_code_root_scan_time_sec(); 417 418 G1GCPhaseTimes* p = _g1p->phase_times(); 419 420 p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 421 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards); 422 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards); 423 p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards); 424 425 p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec()); 426 } 427 428 // Closure used for updating rem sets. Only called during an evacuation pause. 429 class G1RefineCardClosure: public CardTableEntryClosure { 430 G1RemSet* _g1rs; 431 G1ScanObjsDuringUpdateRSClosure* _update_rs_cl; 432 433 size_t _cards_scanned; 434 size_t _cards_skipped; 435 public: 436 G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) : 437 _g1rs(g1h->g1_rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) 438 {} 439 440 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 441 // The only time we care about recording cards that 442 // contain references that point into the collection set 443 // is during RSet updating within an evacuation pause. 444 // In this case worker_i should be the id of a GC worker thread. 445 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 446 447 bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl); 448 449 if (card_scanned) { 450 _cards_scanned++; 451 } else { 452 _cards_skipped++; 453 } 454 return true; 455 } 456 457 size_t cards_scanned() const { return _cards_scanned; } 458 size_t cards_skipped() const { return _cards_skipped; } 459 }; 460 461 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) { 462 G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1, pss, worker_i); 463 G1RefineCardClosure refine_card_cl(_g1, &update_rs_cl); 464 465 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 466 if (G1HotCardCache::default_use_cache()) { 467 // Apply the closure to the entries of the hot card cache. 468 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 469 _g1->iterate_hcc_closure(&refine_card_cl, worker_i); 470 } 471 // Apply the closure to all remaining log entries. 472 _g1->iterate_dirty_card_closure(&refine_card_cl, worker_i); 473 474 G1GCPhaseTimes* p = _g1p->phase_times(); 475 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards); 476 p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards); 477 } 478 479 void G1RemSet::cleanupHRRS() { 480 HeapRegionRemSet::cleanup(); 481 } 482 483 void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, 484 CodeBlobClosure* heap_region_codeblobs, 485 uint worker_i) { 486 update_rem_set(pss, worker_i); 487 scan_rem_set(pss, heap_region_codeblobs, worker_i);; 488 } 489 490 void G1RemSet::prepare_for_oops_into_collection_set_do() { 491 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 492 dcqs.concatenate_logs(); 493 494 _scan_state->reset(); 495 } 496 497 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 498 G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times(); 499 500 // Set all cards back to clean. 501 double start = os::elapsedTime(); 502 _scan_state->clear_card_table(_g1->workers()); 503 phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0); 504 } 505 506 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) { 507 #ifdef ASSERT 508 G1CollectedHeap* g1 = G1CollectedHeap::heap(); 509 assert(g1->is_in_exact(ct->addr_for(card_ptr)), 510 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 511 p2i(card_ptr), 512 ct->index_for(ct->addr_for(card_ptr)), 513 p2i(ct->addr_for(card_ptr)), 514 g1->addr_to_region(ct->addr_for(card_ptr))); 515 #endif 516 } 517 518 void G1RemSet::refine_card_concurrently(jbyte* card_ptr, 519 uint worker_i) { 520 assert(!_g1->is_gc_active(), "Only call concurrently"); 521 522 check_card_ptr(card_ptr, _ct); 523 524 // If the card is no longer dirty, nothing to do. 525 if (*card_ptr != G1CardTable::dirty_card_val()) { 526 return; 527 } 528 529 // Construct the region representing the card. 530 HeapWord* start = _ct->addr_for(card_ptr); 531 // And find the region containing it. 532 HeapRegion* r = _g1->heap_region_containing(start); 533 534 // This check is needed for some uncommon cases where we should 535 // ignore the card. 536 // 537 // The region could be young. Cards for young regions are 538 // distinctly marked (set to g1_young_gen), so the post-barrier will 539 // filter them out. However, that marking is performed 540 // concurrently. A write to a young object could occur before the 541 // card has been marked young, slipping past the filter. 542 // 543 // The card could be stale, because the region has been freed since 544 // the card was recorded. In this case the region type could be 545 // anything. If (still) free or (reallocated) young, just ignore 546 // it. If (reallocated) old or humongous, the later card trimming 547 // and additional checks in iteration may detect staleness. At 548 // worst, we end up processing a stale card unnecessarily. 549 // 550 // In the normal (non-stale) case, the synchronization between the 551 // enqueueing of the card and processing it here will have ensured 552 // we see the up-to-date region type here. 553 if (!r->is_old_or_humongous()) { 554 return; 555 } 556 557 // The result from the hot card cache insert call is either: 558 // * pointer to the current card 559 // (implying that the current card is not 'hot'), 560 // * null 561 // (meaning we had inserted the card ptr into the "hot" card cache, 562 // which had some headroom), 563 // * a pointer to a "hot" card that was evicted from the "hot" cache. 564 // 565 566 if (_hot_card_cache->use_cache()) { 567 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 568 569 const jbyte* orig_card_ptr = card_ptr; 570 card_ptr = _hot_card_cache->insert(card_ptr); 571 if (card_ptr == NULL) { 572 // There was no eviction. Nothing to do. 573 return; 574 } else if (card_ptr != orig_card_ptr) { 575 // Original card was inserted and an old card was evicted. 576 start = _ct->addr_for(card_ptr); 577 r = _g1->heap_region_containing(start); 578 579 // Check whether the region formerly in the cache should be 580 // ignored, as discussed earlier for the original card. The 581 // region could have been freed while in the cache. 582 if (!r->is_old_or_humongous()) { 583 return; 584 } 585 } // Else we still have the original card. 586 } 587 588 // Trim the region designated by the card to what's been allocated 589 // in the region. The card could be stale, or the card could cover 590 // (part of) an object at the end of the allocated space and extend 591 // beyond the end of allocation. 592 593 // Non-humongous objects are only allocated in the old-gen during 594 // GC, so if region is old then top is stable. Humongous object 595 // allocation sets top last; if top has not yet been set, this is 596 // a stale card and we'll end up with an empty intersection. If 597 // this is not a stale card, the synchronization between the 598 // enqueuing of the card and processing it here will have ensured 599 // we see the up-to-date top here. 600 HeapWord* scan_limit = r->top(); 601 602 if (scan_limit <= start) { 603 // If the trimmed region is empty, the card must be stale. 604 return; 605 } 606 607 // Okay to clean and process the card now. There are still some 608 // stale card cases that may be detected by iteration and dealt with 609 // as iteration failure. 610 *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); 611 612 // This fence serves two purposes. First, the card must be cleaned 613 // before processing the contents. Second, we can't proceed with 614 // processing until after the read of top, for synchronization with 615 // possibly concurrent humongous object allocation. It's okay that 616 // reading top and reading type were racy wrto each other. We need 617 // both set, in any order, to proceed. 618 OrderAccess::fence(); 619 620 // Don't use addr_for(card_ptr + 1) which can ask for 621 // a card beyond the heap. 622 HeapWord* end = start + G1CardTable::card_size_in_words; 623 MemRegion dirty_region(start, MIN2(scan_limit, end)); 624 assert(!dirty_region.is_empty(), "sanity"); 625 626 G1ConcurrentRefineOopClosure conc_refine_cl(_g1, worker_i); 627 628 bool card_processed = 629 r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl); 630 631 // If unable to process the card then we encountered an unparsable 632 // part of the heap (e.g. a partially allocated object) while 633 // processing a stale card. Despite the card being stale, redirty 634 // and re-enqueue, because we've already cleaned the card. Without 635 // this we could incorrectly discard a non-stale card. 636 if (!card_processed) { 637 // The card might have gotten re-dirtied and re-enqueued while we 638 // worked. (In fact, it's pretty likely.) 639 if (*card_ptr != G1CardTable::dirty_card_val()) { 640 *card_ptr = G1CardTable::dirty_card_val(); 641 MutexLockerEx x(Shared_DirtyCardQ_lock, 642 Mutex::_no_safepoint_check_flag); 643 DirtyCardQueue* sdcq = 644 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 645 sdcq->enqueue(card_ptr); 646 } 647 } else { 648 _num_conc_refined_cards++; // Unsynchronized update, only used for logging. 649 } 650 } 651 652 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr, 653 G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { 654 assert(_g1->is_gc_active(), "Only call during GC"); 655 656 check_card_ptr(card_ptr, _ct); 657 658 // If the card is no longer dirty, nothing to do. This covers cards that were already 659 // scanned as parts of the remembered sets. 660 if (*card_ptr != G1CardTable::dirty_card_val()) { 661 return false; 662 } 663 664 // We claim lazily (so races are possible but they're benign), which reduces the 665 // number of potential duplicate scans (multiple threads may enqueue the same card twice). 666 *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val(); 667 668 // Construct the region representing the card. 669 HeapWord* card_start = _ct->addr_for(card_ptr); 670 // And find the region containing it. 671 uint const card_region_idx = _g1->addr_to_region(card_start); 672 673 _scan_state->add_dirty_region(card_region_idx); 674 HeapWord* scan_limit = _scan_state->scan_top(card_region_idx); 675 if (scan_limit <= card_start) { 676 // If the card starts above the area in the region containing objects to scan, skip it. 677 return false; 678 } 679 680 // Don't use addr_for(card_ptr + 1) which can ask for 681 // a card beyond the heap. 682 HeapWord* card_end = card_start + G1CardTable::card_size_in_words; 683 MemRegion dirty_region(card_start, MIN2(scan_limit, card_end)); 684 assert(!dirty_region.is_empty(), "sanity"); 685 686 HeapRegion* const card_region = _g1->region_at(card_region_idx); 687 update_rs_cl->set_region(card_region); 688 bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl); 689 assert(card_processed, "must be"); 690 return true; 691 } 692 693 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 694 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 695 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 696 697 G1RemSetSummary current(this); 698 _prev_period_summary.subtract_from(¤t); 699 700 Log(gc, remset) log; 701 log.trace("%s", header); 702 ResourceMark rm; 703 LogStream ls(log.trace()); 704 _prev_period_summary.print_on(&ls); 705 706 _prev_period_summary.set(¤t); 707 } 708 } 709 710 void G1RemSet::print_summary_info() { 711 Log(gc, remset, exit) log; 712 if (log.is_trace()) { 713 log.trace(" Cumulative RS summary"); 714 G1RemSetSummary current(this); 715 ResourceMark rm; 716 LogStream ls(log.trace()); 717 current.print_on(&ls); 718 } 719 } 720 721 class G1RebuildRemSetTask: public AbstractGangTask { 722 // Aggregate the counting data that was constructed concurrently 723 // with marking. 724 class G1RebuildRemSetHeapRegionClosure : public HeapRegionClosure { 725 G1ConcurrentMark* _cm; 726 G1RebuildRemSetClosure _update_cl; 727 728 // Applies _update_cl to the references of the given object, limiting objArrays 729 // to the given MemRegion. Returns the amount of words actually scanned. 730 size_t scan_for_references(oop const obj, MemRegion mr) { 731 size_t const obj_size = obj->size(); 732 // All non-objArrays and objArrays completely within the mr 733 // can be scanned without passing the mr. 734 if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) { 735 obj->oop_iterate(&_update_cl); 736 return obj_size; 737 } 738 // This path is for objArrays crossing the given MemRegion. Only scan the 739 // area within the MemRegion. 740 obj->oop_iterate(&_update_cl, mr); 741 return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size(); 742 } 743 744 // A humongous object is live (with respect to the scanning) either 745 // a) it is marked on the bitmap as such 746 // b) its TARS is larger than TAMS, i.e. has been allocated during marking. 747 bool is_humongous_live(oop const humongous_obj, const G1CMBitMap* const bitmap, HeapWord* tams, HeapWord* tars) const { 748 return bitmap->is_marked(humongous_obj) || (tars > tams); 749 } 750 751 // Iterator over the live objects within the given MemRegion. 752 class LiveObjIterator : public StackObj { 753 const G1CMBitMap* const _bitmap; 754 const HeapWord* _tams; 755 const MemRegion _mr; 756 HeapWord* _current; 757 758 bool is_below_tams() const { 759 return _current < _tams; 760 } 761 762 bool is_live(HeapWord* obj) const { 763 return !is_below_tams() || _bitmap->is_marked(obj); 764 } 765 766 HeapWord* bitmap_limit() const { 767 return MIN2(const_cast<HeapWord*>(_tams), _mr.end()); 768 } 769 770 void move_if_below_tams() { 771 if (is_below_tams() && has_next()) { 772 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 773 } 774 } 775 public: 776 LiveObjIterator(const G1CMBitMap* const bitmap, const HeapWord* tams, const MemRegion mr, HeapWord* first_oop_into_mr) : 777 _bitmap(bitmap), 778 _tams(tams), 779 _mr(mr), 780 _current(first_oop_into_mr) { 781 782 assert(_current <= _mr.start(), 783 "First oop " PTR_FORMAT " should extend into mr [" PTR_FORMAT ", " PTR_FORMAT ")", 784 p2i(first_oop_into_mr), p2i(mr.start()), p2i(mr.end())); 785 786 // Step to the next live object within the MemRegion if needed. 787 if (is_live(_current)) { 788 // Non-objArrays were scanned by the previous part of that region. 789 if (_current < mr.start() && !oop(_current)->is_objArray()) { 790 _current += oop(_current)->size(); 791 // We might have positioned _current on a non-live object. Reposition to the next 792 // live one if needed. 793 move_if_below_tams(); 794 } 795 } else { 796 // The object at _current can only be dead if below TAMS, so we can use the bitmap. 797 // immediately. 798 _current = _bitmap->get_next_marked_addr(_current, bitmap_limit()); 799 assert(_current == _mr.end() || is_live(_current), 800 "Current " PTR_FORMAT " should be live (%s) or beyond the end of the MemRegion (" PTR_FORMAT ")", 801 p2i(_current), BOOL_TO_STR(is_live(_current)), p2i(_mr.end())); 802 } 803 } 804 805 void move_to_next() { 806 _current += next()->size(); 807 move_if_below_tams(); 808 } 809 810 oop next() const { 811 oop result = oop(_current); 812 assert(is_live(_current), 813 "Object " PTR_FORMAT " must be live TAMS " PTR_FORMAT " below %d mr " PTR_FORMAT " " PTR_FORMAT " outside %d", 814 p2i(_current), p2i(_tams), _tams > _current, p2i(_mr.start()), p2i(_mr.end()), _mr.contains(result)); 815 return result; 816 } 817 818 bool has_next() const { 819 return _current < _mr.end(); 820 } 821 }; 822 823 // Rebuild remembered sets in the part of the region specified by mr and hr. 824 // Objects between the bottom of the region and the TAMS are checked for liveness 825 // using the given bitmap. Objects between TAMS and TARS are assumed to be live. 826 // Returns the number of live words between bottom and TAMS. 827 size_t rebuild_rem_set_in_region(const G1CMBitMap* const bitmap, 828 HeapWord* const top_at_mark_start, 829 HeapWord* const top_at_rebuild_start, 830 HeapRegion* hr, 831 MemRegion mr) { 832 size_t marked_words = 0; 833 834 if (hr->is_humongous()) { 835 oop const humongous_obj = oop(hr->humongous_start_region()->bottom()); 836 if (is_humongous_live(humongous_obj, bitmap, top_at_mark_start, top_at_rebuild_start)) { 837 // We need to scan both [bottom, TAMS) and [TAMS, top_at_rebuild_start); 838 // however in case of humongous objects it is sufficient to scan the encompassing 839 // area (top_at_rebuild_start is always larger or equal to TAMS) as one of the 840 // two areas will be zero sized. I.e. TAMS is either 841 // the same as bottom or top(_at_rebuild_start). There is no way TAMS has a different 842 // value: this would mean that TAMS points somewhere into the object. 843 assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start, 844 "More than one object in the humongous region?"); 845 humongous_obj->oop_iterate(&_update_cl, mr); 846 return top_at_mark_start != hr->bottom() ? mr.byte_size() : 0; 847 } else { 848 return 0; 849 } 850 } 851 852 for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) { 853 oop obj = it.next(); 854 size_t scanned_size = scan_for_references(obj, mr); 855 if ((HeapWord*)obj < top_at_mark_start) { 856 marked_words += scanned_size; 857 } 858 } 859 860 return marked_words * HeapWordSize; 861 } 862 public: 863 G1RebuildRemSetHeapRegionClosure(G1CollectedHeap* g1h, 864 G1ConcurrentMark* cm, 865 uint worker_id) : 866 HeapRegionClosure(), 867 _cm(cm), 868 _update_cl(g1h, worker_id) { } 869 870 bool do_heap_region(HeapRegion* hr) { 871 if (_cm->has_aborted()) { 872 return true; 873 } 874 875 uint const region_idx = hr->hrm_index(); 876 DEBUG_ONLY(HeapWord* const top_at_rebuild_start_check = _cm->top_at_rebuild_start(region_idx);) 877 assert(top_at_rebuild_start_check == NULL || 878 top_at_rebuild_start_check > hr->bottom(), 879 "A TARS (" PTR_FORMAT ") == bottom() (" PTR_FORMAT ") indicates the old region %u is empty (%s)", 880 p2i(top_at_rebuild_start_check), p2i(hr->bottom()), region_idx, hr->get_type_str()); 881 882 size_t total_marked_bytes = 0; 883 size_t const chunk_size_in_words = G1RebuildRemSetChunkSize / HeapWordSize; 884 885 HeapWord* const top_at_mark_start = hr->next_top_at_mark_start(); 886 887 HeapWord* cur = hr->bottom(); 888 while (cur < hr->end()) { 889 // After every iteration (yield point) we need to check whether the region's 890 // TARS changed due to e.g. eager reclaim. 891 HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx); 892 if (top_at_rebuild_start == NULL) { 893 return false; 894 } 895 896 MemRegion next_chunk = MemRegion(hr->bottom(), top_at_rebuild_start).intersection(MemRegion(cur, chunk_size_in_words)); 897 if (next_chunk.is_empty()) { 898 break; 899 } 900 901 const Ticks start = Ticks::now(); 902 size_t marked_bytes = rebuild_rem_set_in_region(_cm->next_mark_bitmap(), 903 top_at_mark_start, 904 top_at_rebuild_start, 905 hr, 906 next_chunk); 907 Tickspan time = Ticks::now() - start; 908 909 log_trace(gc, remset, tracking)("Rebuilt region %u " 910 "live " SIZE_FORMAT " " 911 "time %.3fms " 912 "marked bytes " SIZE_FORMAT " " 913 "bot " PTR_FORMAT " " 914 "TAMS " PTR_FORMAT " " 915 "TARS " PTR_FORMAT, 916 region_idx, 917 _cm->liveness(region_idx) * HeapWordSize, 918 TicksToTimeHelper::seconds(time) * 1000.0, 919 marked_bytes, 920 p2i(hr->bottom()), 921 p2i(top_at_mark_start), 922 p2i(top_at_rebuild_start)); 923 924 if (marked_bytes > 0) { 925 hr->add_to_marked_bytes(marked_bytes); 926 total_marked_bytes += marked_bytes; 927 } 928 cur += chunk_size_in_words; 929 930 _cm->do_yield_check(); 931 if (_cm->has_aborted()) { 932 return true; 933 } 934 } 935 // In the final iteration of the loop the region might have been eagerly reclaimed. 936 // Simply filter out those regions. We can not just use region type because there 937 // might have already been new allocations into these regions. 938 DEBUG_ONLY(HeapWord* const top_at_rebuild_start = _cm->top_at_rebuild_start(region_idx);) 939 assert(!hr->is_old() || 940 top_at_rebuild_start == NULL || 941 total_marked_bytes == _cm->liveness(region_idx) * HeapWordSize, 942 "Marked bytes " SIZE_FORMAT " for region %u (%s) in [bottom, TAMS) do not match liveness during mark " SIZE_FORMAT " " 943 "(" PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT ")", 944 total_marked_bytes, hr->hrm_index(), hr->get_type_str(), _cm->liveness(region_idx) * HeapWordSize, 945 p2i(hr->bottom()), p2i(top_at_mark_start), p2i(top_at_rebuild_start)); 946 // Abort state may have changed after the yield check. 947 return _cm->has_aborted(); 948 } 949 }; 950 951 HeapRegionClaimer _hr_claimer; 952 G1ConcurrentMark* _cm; 953 954 uint _worker_id_offset; 955 public: 956 G1RebuildRemSetTask(G1ConcurrentMark* cm, 957 uint n_workers, 958 uint worker_id_offset) : 959 AbstractGangTask("G1 Rebuild Remembered Set"), 960 _cm(cm), 961 _hr_claimer(n_workers), 962 _worker_id_offset(worker_id_offset) { 963 } 964 965 void work(uint worker_id) { 966 SuspendibleThreadSetJoiner sts_join; 967 968 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 969 970 G1RebuildRemSetHeapRegionClosure cl(g1h, _cm, _worker_id_offset + worker_id); 971 g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id); 972 } 973 }; 974 975 void G1RemSet::rebuild_rem_set(G1ConcurrentMark* cm, 976 WorkGang* workers, 977 uint worker_id_offset) { 978 uint num_workers = workers->active_workers(); 979 980 G1RebuildRemSetTask cl(cm, 981 num_workers, 982 worker_id_offset); 983 workers->run_task(&cl, num_workers); 984 } --- EOF ---