1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/concurrentG1RefineThread.hpp" 28 #include "gc/g1/dirtyCardQueue.hpp" 29 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 30 #include "gc/g1/g1CollectedHeap.inline.hpp" 31 #include "gc/g1/g1CollectorPolicy.hpp" 32 #include "gc/g1/g1FromCardCache.hpp" 33 #include "gc/g1/g1GCPhaseTimes.hpp" 34 #include "gc/g1/g1HotCardCache.hpp" 35 #include "gc/g1/g1OopClosures.inline.hpp" 36 #include "gc/g1/g1RemSet.inline.hpp" 37 #include "gc/g1/heapRegion.inline.hpp" 38 #include "gc/g1/heapRegionManager.inline.hpp" 39 #include "gc/g1/heapRegionRemSet.hpp" 40 #include "memory/iterator.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "utilities/globalDefinitions.hpp" 43 #include "utilities/intHisto.hpp" 44 #include "utilities/stack.inline.hpp" 45 46 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) : 47 _g1(g1), 48 _conc_refine_cards(0), 49 _ct_bs(ct_bs), 50 _g1p(_g1->g1_policy()), 51 _cg1r(g1->concurrent_g1_refine()), 52 _cset_rs_update_cl(NULL), 53 _prev_period_summary(), 54 _into_cset_dirty_card_queue_set(false) 55 { 56 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC); 57 for (uint i = 0; i < n_workers(); i++) { 58 _cset_rs_update_cl[i] = NULL; 59 } 60 if (log_is_enabled(Trace, gc, remset)) { 61 _prev_period_summary.initialize(this); 62 } 63 // Initialize the card queue set used to hold cards containing 64 // references into the collection set. 65 _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code 66 DirtyCardQ_CBL_mon, 67 DirtyCardQ_FL_lock, 68 -1, // never trigger processing 69 -1, // no limit on length 70 Shared_DirtyCardQ_lock, 71 &JavaThread::dirty_card_queue_set()); 72 } 73 74 G1RemSet::~G1RemSet() { 75 for (uint i = 0; i < n_workers(); i++) { 76 assert(_cset_rs_update_cl[i] == NULL, "it should be"); 77 } 78 FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl); 79 } 80 81 uint G1RemSet::num_par_rem_sets() { 82 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads); 83 } 84 85 void G1RemSet::initialize(uint max_regions) { 86 G1FromCardCache::initialize(num_par_rem_sets(), max_regions); 87 } 88 89 ScanRSClosure::ScanRSClosure(G1ParPushHeapRSClosure* oc, 90 CodeBlobClosure* code_root_cl, 91 uint worker_i) : 92 _oc(oc), 93 _code_root_cl(code_root_cl), 94 _strong_code_root_scan_time_sec(0.0), 95 _cards(0), 96 _cards_done(0), 97 _worker_i(worker_i), 98 _try_claimed(false) { 99 _g1h = G1CollectedHeap::heap(); 100 _bot = _g1h->bot(); 101 _ct_bs = _g1h->g1_barrier_set(); 102 _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); 103 } 104 105 void ScanRSClosure::scanCard(size_t index, HeapRegion *r) { 106 // Stack allocate the DirtyCardToOopClosure instance 107 HeapRegionDCTOC cl(_g1h, r, _oc, 108 CardTableModRefBS::Precise); 109 110 // Set the "from" region in the closure. 111 _oc->set_region(r); 112 MemRegion card_region(_bot->address_for_index(index), G1BlockOffsetTable::N_words); 113 MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); 114 MemRegion mr = pre_gc_allocated.intersection(card_region); 115 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { 116 // We make the card as "claimed" lazily (so races are possible 117 // but they're benign), which reduces the number of duplicate 118 // scans (the rsets of the regions in the cset can intersect). 119 _ct_bs->set_card_claimed(index); 120 _cards_done++; 121 cl.do_MemRegion(mr); 122 } 123 } 124 125 void ScanRSClosure::scan_strong_code_roots(HeapRegion* r) { 126 double scan_start = os::elapsedTime(); 127 r->strong_code_roots_do(_code_root_cl); 128 _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start); 129 } 130 131 bool ScanRSClosure::doHeapRegion(HeapRegion* r) { 132 assert(r->in_collection_set(), "should only be called on elements of CS."); 133 HeapRegionRemSet* hrrs = r->rem_set(); 134 if (hrrs->iter_is_complete()) return false; // All done. 135 if (!_try_claimed && !hrrs->claim_iter()) return false; 136 // If we ever free the collection set concurrently, we should also 137 // clear the card table concurrently therefore we won't need to 138 // add regions of the collection set to the dirty cards region. 139 _g1h->push_dirty_cards_region(r); 140 // If we didn't return above, then 141 // _try_claimed || r->claim_iter() 142 // is true: either we're supposed to work on claimed-but-not-complete 143 // regions, or we successfully claimed the region. 144 145 HeapRegionRemSetIterator iter(hrrs); 146 size_t card_index; 147 148 // We claim cards in block so as to reduce the contention. The block size is determined by 149 // the G1RSetScanBlockSize parameter. 150 size_t jump_to_card = hrrs->iter_claimed_next(_block_size); 151 for (size_t current_card = 0; iter.has_next(card_index); current_card++) { 152 if (current_card >= jump_to_card + _block_size) { 153 jump_to_card = hrrs->iter_claimed_next(_block_size); 154 } 155 if (current_card < jump_to_card) continue; 156 HeapWord* card_start = _g1h->bot()->address_for_index(card_index); 157 158 HeapRegion* card_region = _g1h->heap_region_containing(card_start); 159 _cards++; 160 161 if (!card_region->is_on_dirty_cards_region_list()) { 162 _g1h->push_dirty_cards_region(card_region); 163 } 164 165 // If the card is dirty, then we will scan it during updateRS. 166 if (!card_region->in_collection_set() && 167 !_ct_bs->is_card_dirty(card_index)) { 168 scanCard(card_index, card_region); 169 } 170 } 171 if (!_try_claimed) { 172 // Scan the strong code root list attached to the current region 173 scan_strong_code_roots(r); 174 175 hrrs->set_iter_complete(); 176 } 177 return false; 178 } 179 180 size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc, 181 CodeBlobClosure* heap_region_codeblobs, 182 uint worker_i) { 183 double rs_time_start = os::elapsedTime(); 184 185 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); 186 187 ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i); 188 189 _g1->collection_set_iterate_from(startRegion, &scanRScl); 190 scanRScl.set_try_claimed(); 191 _g1->collection_set_iterate_from(startRegion, &scanRScl); 192 193 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) 194 - scanRScl.strong_code_root_scan_time_sec(); 195 196 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); 197 _g1p->phase_times()->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec()); 198 199 return scanRScl.cards_done(); 200 } 201 202 // Closure used for updating RSets and recording references that 203 // point into the collection set. Only called during an 204 // evacuation pause. 205 206 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure { 207 G1RemSet* _g1rs; 208 DirtyCardQueue* _into_cset_dcq; 209 public: 210 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h, 211 DirtyCardQueue* into_cset_dcq) : 212 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) 213 {} 214 215 bool do_card_ptr(jbyte* card_ptr, uint worker_i) { 216 // The only time we care about recording cards that 217 // contain references that point into the collection set 218 // is during RSet updating within an evacuation pause. 219 // In this case worker_i should be the id of a GC worker thread. 220 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); 221 assert(worker_i < ParallelGCThreads, "should be a GC worker"); 222 223 if (_g1rs->refine_card(card_ptr, worker_i, true)) { 224 // 'card_ptr' contains references that point into the collection 225 // set. We need to record the card in the DCQS 226 // (_into_cset_dirty_card_queue_set) 227 // that's used for that purpose. 228 // 229 // Enqueue the card 230 _into_cset_dcq->enqueue(card_ptr); 231 } 232 return true; 233 } 234 }; 235 236 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) { 237 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); 238 239 G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); 240 { 241 // Apply the closure to the entries of the hot card cache. 242 G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i); 243 _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i); 244 } 245 // Apply the closure to all remaining log entries. 246 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i); 247 } 248 249 void G1RemSet::cleanupHRRS() { 250 HeapRegionRemSet::cleanup(); 251 } 252 253 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc, 254 CodeBlobClosure* heap_region_codeblobs, 255 uint worker_i) { 256 // We cache the value of 'oc' closure into the appropriate slot in the 257 // _cset_rs_update_cl for this worker 258 assert(worker_i < n_workers(), "sanity"); 259 _cset_rs_update_cl[worker_i] = oc; 260 261 // A DirtyCardQueue that is used to hold cards containing references 262 // that point into the collection set. This DCQ is associated with a 263 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal 264 // circumstances (i.e. the pause successfully completes), these cards 265 // are just discarded (there's no need to update the RSets of regions 266 // that were in the collection set - after the pause these regions 267 // are wholly 'free' of live objects. In the event of an evacuation 268 // failure the cards/buffers in this queue set are passed to the 269 // DirtyCardQueueSet that is used to manage RSet updates 270 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 271 272 updateRS(&into_cset_dcq, worker_i); 273 size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i); 274 275 // We now clear the cached values of _cset_rs_update_cl for this worker 276 _cset_rs_update_cl[worker_i] = NULL; 277 return cards_scanned; 278 } 279 280 void G1RemSet::prepare_for_oops_into_collection_set_do() { 281 _g1->set_refine_cte_cl_concurrency(false); 282 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 283 dcqs.concatenate_logs(); 284 } 285 286 void G1RemSet::cleanup_after_oops_into_collection_set_do() { 287 // Cleanup after copy 288 _g1->set_refine_cte_cl_concurrency(true); 289 // Set all cards back to clean. 290 _g1->cleanUpCardTable(); 291 292 DirtyCardQueueSet& into_cset_dcqs = _into_cset_dirty_card_queue_set; 293 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num(); 294 295 if (_g1->evacuation_failed()) { 296 double restore_remembered_set_start = os::elapsedTime(); 297 298 // Restore remembered sets for the regions pointing into the collection set. 299 // We just need to transfer the completed buffers from the DirtyCardQueueSet 300 // used to hold cards that contain references that point into the collection set 301 // to the DCQS used to hold the deferred RS updates. 302 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs); 303 _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0); 304 } 305 306 // Free any completed buffers in the DirtyCardQueueSet used to hold cards 307 // which contain references that point into the collection. 308 _into_cset_dirty_card_queue_set.clear(); 309 assert(_into_cset_dirty_card_queue_set.completed_buffers_num() == 0, 310 "all buffers should be freed"); 311 _into_cset_dirty_card_queue_set.clear_n_completed_buffers(); 312 } 313 314 class ScrubRSClosure: public HeapRegionClosure { 315 G1CollectedHeap* _g1h; 316 BitMap* _region_bm; 317 BitMap* _card_bm; 318 CardTableModRefBS* _ctbs; 319 public: 320 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : 321 _g1h(G1CollectedHeap::heap()), 322 _region_bm(region_bm), _card_bm(card_bm), 323 _ctbs(_g1h->g1_barrier_set()) {} 324 325 bool doHeapRegion(HeapRegion* r) { 326 if (!r->is_continues_humongous()) { 327 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm); 328 } 329 return false; 330 } 331 }; 332 333 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm, uint worker_num, HeapRegionClaimer *hrclaimer) { 334 ScrubRSClosure scrub_cl(region_bm, card_bm); 335 _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer); 336 } 337 338 G1TriggerClosure::G1TriggerClosure() : 339 _triggered(false) { } 340 341 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl, 342 OopClosure* oop_cl) : 343 _trigger_cl(t_cl), _oop_cl(oop_cl) { } 344 345 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) : 346 _c1(c1), _c2(c2) { } 347 348 G1UpdateRSOrPushRefOopClosure:: 349 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h, 350 G1RemSet* rs, 351 G1ParPushHeapRSClosure* push_ref_cl, 352 bool record_refs_into_cset, 353 uint worker_i) : 354 _g1(g1h), _g1_rem_set(rs), _from(NULL), 355 _record_refs_into_cset(record_refs_into_cset), 356 _push_ref_cl(push_ref_cl), _worker_i(worker_i) { } 357 358 // Returns true if the given card contains references that point 359 // into the collection set, if we're checking for such references; 360 // false otherwise. 361 362 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, 363 bool check_for_refs_into_cset) { 364 assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), 365 "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", 366 p2i(card_ptr), 367 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), 368 p2i(_ct_bs->addr_for(card_ptr)), 369 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))); 370 371 // If the card is no longer dirty, nothing to do. 372 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 373 // No need to return that this card contains refs that point 374 // into the collection set. 375 return false; 376 } 377 378 // Construct the region representing the card. 379 HeapWord* start = _ct_bs->addr_for(card_ptr); 380 // And find the region containing it. 381 HeapRegion* r = _g1->heap_region_containing(start); 382 383 // Why do we have to check here whether a card is on a young region, 384 // given that we dirty young regions and, as a result, the 385 // post-barrier is supposed to filter them out and never to enqueue 386 // them? When we allocate a new region as the "allocation region" we 387 // actually dirty its cards after we release the lock, since card 388 // dirtying while holding the lock was a performance bottleneck. So, 389 // as a result, it is possible for other threads to actually 390 // allocate objects in the region (after the acquire the lock) 391 // before all the cards on the region are dirtied. This is unlikely, 392 // and it doesn't happen often, but it can happen. So, the extra 393 // check below filters out those cards. 394 if (r->is_young()) { 395 return false; 396 } 397 398 // While we are processing RSet buffers during the collection, we 399 // actually don't want to scan any cards on the collection set, 400 // since we don't want to update remembered sets with entries that 401 // point into the collection set, given that live objects from the 402 // collection set are about to move and such entries will be stale 403 // very soon. This change also deals with a reliability issue which 404 // involves scanning a card in the collection set and coming across 405 // an array that was being chunked and looking malformed. Note, 406 // however, that if evacuation fails, we have to scan any objects 407 // that were not moved and create any missing entries. 408 if (r->in_collection_set()) { 409 return false; 410 } 411 412 // The result from the hot card cache insert call is either: 413 // * pointer to the current card 414 // (implying that the current card is not 'hot'), 415 // * null 416 // (meaning we had inserted the card ptr into the "hot" card cache, 417 // which had some headroom), 418 // * a pointer to a "hot" card that was evicted from the "hot" cache. 419 // 420 421 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 422 if (hot_card_cache->use_cache()) { 423 assert(!check_for_refs_into_cset, "sanity"); 424 assert(!SafepointSynchronize::is_at_safepoint(), "sanity"); 425 426 card_ptr = hot_card_cache->insert(card_ptr); 427 if (card_ptr == NULL) { 428 // There was no eviction. Nothing to do. 429 return false; 430 } 431 432 start = _ct_bs->addr_for(card_ptr); 433 r = _g1->heap_region_containing(start); 434 435 // Checking whether the region we got back from the cache 436 // is young here is inappropriate. The region could have been 437 // freed, reallocated and tagged as young while in the cache. 438 // Hence we could see its young type change at any time. 439 } 440 441 // Don't use addr_for(card_ptr + 1) which can ask for 442 // a card beyond the heap. This is not safe without a perm 443 // gen at the upper end of the heap. 444 HeapWord* end = start + CardTableModRefBS::card_size_in_words; 445 MemRegion dirtyRegion(start, end); 446 447 G1ParPushHeapRSClosure* oops_in_heap_closure = NULL; 448 if (check_for_refs_into_cset) { 449 // ConcurrentG1RefineThreads have worker numbers larger than what 450 // _cset_rs_update_cl[] is set up to handle. But those threads should 451 // only be active outside of a collection which means that when they 452 // reach here they should have check_for_refs_into_cset == false. 453 assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length"); 454 oops_in_heap_closure = _cset_rs_update_cl[worker_i]; 455 } 456 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, 457 _g1->g1_rem_set(), 458 oops_in_heap_closure, 459 check_for_refs_into_cset, 460 worker_i); 461 update_rs_oop_cl.set_from(r); 462 463 G1TriggerClosure trigger_cl; 464 FilterIntoCSClosure into_cs_cl(_g1, &trigger_cl); 465 G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl); 466 G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl); 467 468 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, 469 (check_for_refs_into_cset ? 470 (OopClosure*)&mux : 471 (OopClosure*)&update_rs_oop_cl)); 472 473 // The region for the current card may be a young region. The 474 // current card may have been a card that was evicted from the 475 // card cache. When the card was inserted into the cache, we had 476 // determined that its region was non-young. While in the cache, 477 // the region may have been freed during a cleanup pause, reallocated 478 // and tagged as young. 479 // 480 // We wish to filter out cards for such a region but the current 481 // thread, if we're running concurrently, may "see" the young type 482 // change at any time (so an earlier "is_young" check may pass or 483 // fail arbitrarily). We tell the iteration code to perform this 484 // filtering when it has been determined that there has been an actual 485 // allocation in this region and making it safe to check the young type. 486 bool filter_young = true; 487 488 HeapWord* stop_point = 489 r->oops_on_card_seq_iterate_careful(dirtyRegion, 490 &filter_then_update_rs_oop_cl, 491 filter_young, 492 card_ptr); 493 494 // If stop_point is non-null, then we encountered an unallocated region 495 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the 496 // card and re-enqueue: if we put off the card until a GC pause, then the 497 // unallocated portion will be filled in. Alternatively, we might try 498 // the full complexity of the technique used in "regular" precleaning. 499 if (stop_point != NULL) { 500 // The card might have gotten re-dirtied and re-enqueued while we 501 // worked. (In fact, it's pretty likely.) 502 if (*card_ptr != CardTableModRefBS::dirty_card_val()) { 503 *card_ptr = CardTableModRefBS::dirty_card_val(); 504 MutexLockerEx x(Shared_DirtyCardQ_lock, 505 Mutex::_no_safepoint_check_flag); 506 DirtyCardQueue* sdcq = 507 JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); 508 sdcq->enqueue(card_ptr); 509 } 510 } else { 511 _conc_refine_cards++; 512 } 513 514 // This gets set to true if the card being refined has 515 // references that point into the collection set. 516 bool has_refs_into_cset = trigger_cl.triggered(); 517 518 // We should only be detecting that the card contains references 519 // that point into the collection set if the current thread is 520 // a GC worker thread. 521 assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(), 522 "invalid result at non safepoint"); 523 524 return has_refs_into_cset; 525 } 526 527 void G1RemSet::print_periodic_summary_info(const char* header, uint period_count) { 528 if ((G1SummarizeRSetStatsPeriod > 0) && log_is_enabled(Trace, gc, remset) && 529 (period_count % G1SummarizeRSetStatsPeriod == 0)) { 530 531 if (!_prev_period_summary.initialized()) { 532 _prev_period_summary.initialize(this); 533 } 534 535 G1RemSetSummary current; 536 current.initialize(this); 537 _prev_period_summary.subtract_from(¤t); 538 539 LogHandle(gc, remset) log; 540 log.trace("%s", header); 541 ResourceMark rm; 542 _prev_period_summary.print_on(log.trace_stream()); 543 544 _prev_period_summary.set(¤t); 545 } 546 } 547 548 void G1RemSet::print_summary_info() { 549 LogHandle(gc, remset, exit) log; 550 if (log.is_trace()) { 551 log.trace(" Cumulative RS summary"); 552 G1RemSetSummary current; 553 current.initialize(this); 554 ResourceMark rm; 555 current.print_on(log.trace_stream()); 556 } 557 } 558 559 void G1RemSet::prepare_for_verify() { 560 if (G1HRRSFlushLogBuffersOnVerify && 561 (VerifyBeforeGC || VerifyAfterGC) 562 && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { 563 cleanupHRRS(); 564 _g1->set_refine_cte_cl_concurrency(false); 565 if (SafepointSynchronize::is_at_safepoint()) { 566 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 567 dcqs.concatenate_logs(); 568 } 569 570 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); 571 bool use_hot_card_cache = hot_card_cache->use_cache(); 572 hot_card_cache->set_use_cache(false); 573 574 DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); 575 updateRS(&into_cset_dcq, 0); 576 _into_cset_dirty_card_queue_set.clear(); 577 578 hot_card_cache->set_use_cache(use_hot_card_cache); 579 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); 580 } 581 }