rev 9830 : [mq]: rev.01
1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/concurrentG1Refine.hpp" 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp" 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/heapRegionManager.inline.hpp" 30 #include "gc/g1/heapRegionRemSet.hpp" 31 #include "gc/shared/space.inline.hpp" 32 #include "memory/allocation.hpp" 33 #include "memory/padded.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/atomic.inline.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/globalDefinitions.hpp" 38 #include "utilities/growableArray.hpp" 39 40 class PerRegionTable: public CHeapObj<mtGC> { 41 friend class OtherRegionsTable; 42 friend class HeapRegionRemSetIterator; 43 44 HeapRegion* _hr; 45 BitMap _bm; 46 jint _occupied; 47 48 // next pointer for free/allocated 'all' list 49 PerRegionTable* _next; 50 51 // prev pointer for the allocated 'all' list 52 PerRegionTable* _prev; 53 54 // next pointer in collision list 55 PerRegionTable * _collision_list_next; 56 57 // Global free list of PRTs 58 static PerRegionTable* _free_list; 59 60 protected: 61 // We need access in order to union things into the base table. 62 BitMap* bm() { return &_bm; } 63 64 void recount_occupied() { 65 _occupied = (jint) bm()->count_one_bits(); 66 } 67 68 PerRegionTable(HeapRegion* hr) : 69 _hr(hr), 70 _occupied(0), 71 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */), 72 _collision_list_next(NULL), _next(NULL), _prev(NULL) 73 {} 74 75 void add_card_work(CardIdx_t from_card, bool par) { 76 if (!_bm.at(from_card)) { 77 if (par) { 78 if (_bm.par_at_put(from_card, 1)) { 79 Atomic::inc(&_occupied); 80 } 81 } else { 82 _bm.at_put(from_card, 1); 83 _occupied++; 84 } 85 } 86 } 87 88 void add_reference_work(OopOrNarrowOopStar from, bool par) { 89 // Must make this robust in case "from" is not in "_hr", because of 90 // concurrency. 91 92 HeapRegion* loc_hr = hr(); 93 // If the test below fails, then this table was reused concurrently 94 // with this operation. This is OK, since the old table was coarsened, 95 // and adding a bit to the new table is never incorrect. 96 // If the table used to belong to a continues humongous region and is 97 // now reused for the corresponding start humongous region, we need to 98 // make sure that we detect this. Thus, we call is_in_reserved_raw() 99 // instead of just is_in_reserved() here. 100 if (loc_hr->is_in_reserved(from)) { 101 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom()); 102 CardIdx_t from_card = (CardIdx_t) 103 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize); 104 105 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion, 106 "Must be in range."); 107 add_card_work(from_card, par); 108 } 109 } 110 111 public: 112 113 HeapRegion* hr() const { return _hr; } 114 115 jint occupied() const { 116 // Overkill, but if we ever need it... 117 // guarantee(_occupied == _bm.count_one_bits(), "Check"); 118 return _occupied; 119 } 120 121 void init(HeapRegion* hr, bool clear_links_to_all_list) { 122 if (clear_links_to_all_list) { 123 set_next(NULL); 124 set_prev(NULL); 125 } 126 _hr = hr; 127 _collision_list_next = NULL; 128 _occupied = 0; 129 _bm.clear(); 130 } 131 132 void add_reference(OopOrNarrowOopStar from) { 133 add_reference_work(from, /*parallel*/ true); 134 } 135 136 void seq_add_reference(OopOrNarrowOopStar from) { 137 add_reference_work(from, /*parallel*/ false); 138 } 139 140 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) { 141 HeapWord* hr_bot = hr()->bottom(); 142 size_t hr_first_card_index = ctbs->index_for(hr_bot); 143 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index); 144 recount_occupied(); 145 } 146 147 void add_card(CardIdx_t from_card_index) { 148 add_card_work(from_card_index, /*parallel*/ true); 149 } 150 151 void seq_add_card(CardIdx_t from_card_index) { 152 add_card_work(from_card_index, /*parallel*/ false); 153 } 154 155 // (Destructively) union the bitmap of the current table into the given 156 // bitmap (which is assumed to be of the same size.) 157 void union_bitmap_into(BitMap* bm) { 158 bm->set_union(_bm); 159 } 160 161 // Mem size in bytes. 162 size_t mem_size() const { 163 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; 164 } 165 166 // Requires "from" to be in "hr()". 167 bool contains_reference(OopOrNarrowOopStar from) const { 168 assert(hr()->is_in_reserved(from), "Precondition."); 169 size_t card_ind = pointer_delta(from, hr()->bottom(), 170 CardTableModRefBS::card_size); 171 return _bm.at(card_ind); 172 } 173 174 // Bulk-free the PRTs from prt to last, assumes that they are 175 // linked together using their _next field. 176 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { 177 while (true) { 178 PerRegionTable* fl = _free_list; 179 last->set_next(fl); 180 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl); 181 if (res == fl) { 182 return; 183 } 184 } 185 ShouldNotReachHere(); 186 } 187 188 static void free(PerRegionTable* prt) { 189 bulk_free(prt, prt); 190 } 191 192 // Returns an initialized PerRegionTable instance. 193 static PerRegionTable* alloc(HeapRegion* hr) { 194 PerRegionTable* fl = _free_list; 195 while (fl != NULL) { 196 PerRegionTable* nxt = fl->next(); 197 PerRegionTable* res = 198 (PerRegionTable*) 199 Atomic::cmpxchg_ptr(nxt, &_free_list, fl); 200 if (res == fl) { 201 fl->init(hr, true); 202 return fl; 203 } else { 204 fl = _free_list; 205 } 206 } 207 assert(fl == NULL, "Loop condition."); 208 return new PerRegionTable(hr); 209 } 210 211 PerRegionTable* next() const { return _next; } 212 void set_next(PerRegionTable* next) { _next = next; } 213 PerRegionTable* prev() const { return _prev; } 214 void set_prev(PerRegionTable* prev) { _prev = prev; } 215 216 // Accessor and Modification routines for the pointer for the 217 // singly linked collision list that links the PRTs within the 218 // OtherRegionsTable::_fine_grain_regions hash table. 219 // 220 // It might be useful to also make the collision list doubly linked 221 // to avoid iteration over the collisions list during scrubbing/deletion. 222 // OTOH there might not be many collisions. 223 224 PerRegionTable* collision_list_next() const { 225 return _collision_list_next; 226 } 227 228 void set_collision_list_next(PerRegionTable* next) { 229 _collision_list_next = next; 230 } 231 232 PerRegionTable** collision_list_next_addr() { 233 return &_collision_list_next; 234 } 235 236 static size_t fl_mem_size() { 237 PerRegionTable* cur = _free_list; 238 size_t res = 0; 239 while (cur != NULL) { 240 res += cur->mem_size(); 241 cur = cur->next(); 242 } 243 return res; 244 } 245 246 static void test_fl_mem_size(); 247 }; 248 249 PerRegionTable* PerRegionTable::_free_list = NULL; 250 251 size_t OtherRegionsTable::_max_fine_entries = 0; 252 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; 253 size_t OtherRegionsTable::_fine_eviction_stride = 0; 254 size_t OtherRegionsTable::_fine_eviction_sample_size = 0; 255 256 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) : 257 _g1h(G1CollectedHeap::heap()), 258 _hr(hr), _m(m), 259 _coarse_map(G1CollectedHeap::heap()->max_regions(), 260 false /* in-resource-area */), 261 _fine_grain_regions(NULL), 262 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL), 263 _n_fine_entries(0), _n_coarse_entries(0), 264 _fine_eviction_start(0), 265 _sparse_table(hr) 266 { 267 typedef PerRegionTable* PerRegionTablePtr; 268 269 if (_max_fine_entries == 0) { 270 assert(_mod_max_fine_entries_mask == 0, "Both or none."); 271 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); 272 _max_fine_entries = (size_t)1 << max_entries_log; 273 _mod_max_fine_entries_mask = _max_fine_entries - 1; 274 275 assert(_fine_eviction_sample_size == 0 276 && _fine_eviction_stride == 0, "All init at same time."); 277 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); 278 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; 279 } 280 281 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, 282 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); 283 284 if (_fine_grain_regions == NULL) { 285 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, 286 "Failed to allocate _fine_grain_entries."); 287 } 288 289 for (size_t i = 0; i < _max_fine_entries; i++) { 290 _fine_grain_regions[i] = NULL; 291 } 292 } 293 294 void OtherRegionsTable::link_to_all(PerRegionTable* prt) { 295 // We always append to the beginning of the list for convenience; 296 // the order of entries in this list does not matter. 297 if (_first_all_fine_prts != NULL) { 298 assert(_first_all_fine_prts->prev() == NULL, "invariant"); 299 _first_all_fine_prts->set_prev(prt); 300 prt->set_next(_first_all_fine_prts); 301 } else { 302 // this is the first element we insert. Adjust the "last" pointer 303 _last_all_fine_prts = prt; 304 assert(prt->next() == NULL, "just checking"); 305 } 306 // the new element is always the first element without a predecessor 307 prt->set_prev(NULL); 308 _first_all_fine_prts = prt; 309 310 assert(prt->prev() == NULL, "just checking"); 311 assert(_first_all_fine_prts == prt, "just checking"); 312 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 313 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 314 "just checking"); 315 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 316 "just checking"); 317 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 318 "just checking"); 319 } 320 321 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { 322 if (prt->prev() != NULL) { 323 assert(_first_all_fine_prts != prt, "just checking"); 324 prt->prev()->set_next(prt->next()); 325 // removing the last element in the list? 326 if (_last_all_fine_prts == prt) { 327 _last_all_fine_prts = prt->prev(); 328 } 329 } else { 330 assert(_first_all_fine_prts == prt, "just checking"); 331 _first_all_fine_prts = prt->next(); 332 // list is empty now? 333 if (_first_all_fine_prts == NULL) { 334 _last_all_fine_prts = NULL; 335 } 336 } 337 338 if (prt->next() != NULL) { 339 prt->next()->set_prev(prt->prev()); 340 } 341 342 prt->set_next(NULL); 343 prt->set_prev(NULL); 344 345 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || 346 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), 347 "just checking"); 348 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, 349 "just checking"); 350 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, 351 "just checking"); 352 } 353 354 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { 355 uint cur_hrm_ind = _hr->hrm_index(); 356 357 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift); 358 359 if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) { 360 assert(contains_reference(from), "We just added it!"); 361 return; 362 } 363 364 // Note that this may be a continued H region. 365 HeapRegion* from_hr = _g1h->heap_region_containing(from); 366 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); 367 368 // If the region is already coarsened, return. 369 if (_coarse_map.at(from_hrm_ind)) { 370 assert(contains_reference(from), "We just added it!"); 371 return; 372 } 373 374 // Otherwise find a per-region table to add it to. 375 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; 376 PerRegionTable* prt = find_region_table(ind, from_hr); 377 if (prt == NULL) { 378 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag); 379 // Confirm that it's really not there... 380 prt = find_region_table(ind, from_hr); 381 if (prt == NULL) { 382 383 uintptr_t from_hr_bot_card_index = 384 uintptr_t(from_hr->bottom()) 385 >> CardTableModRefBS::card_shift; 386 CardIdx_t card_index = from_card - from_hr_bot_card_index; 387 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, 388 "Must be in range."); 389 if (G1HRRSUseSparseTable && 390 _sparse_table.add_card(from_hrm_ind, card_index)) { 391 assert(contains_reference_locked(from), "We just added it!"); 392 return; 393 } 394 395 if (_n_fine_entries == _max_fine_entries) { 396 prt = delete_region_table(); 397 // There is no need to clear the links to the 'all' list here: 398 // prt will be reused immediately, i.e. remain in the 'all' list. 399 prt->init(from_hr, false /* clear_links_to_all_list */); 400 } else { 401 prt = PerRegionTable::alloc(from_hr); 402 link_to_all(prt); 403 } 404 405 PerRegionTable* first_prt = _fine_grain_regions[ind]; 406 prt->set_collision_list_next(first_prt); 407 // The assignment into _fine_grain_regions allows the prt to 408 // start being used concurrently. In addition to 409 // collision_list_next which must be visible (else concurrent 410 // parsing of the list, if any, may fail to see other entries), 411 // the content of the prt must be visible (else for instance 412 // some mark bits may not yet seem cleared or a 'later' update 413 // performed by a concurrent thread could be undone when the 414 // zeroing becomes visible). This requires store ordering. 415 OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt); 416 _n_fine_entries++; 417 418 if (G1HRRSUseSparseTable) { 419 // Transfer from sparse to fine-grain. 420 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); 421 assert(sprt_entry != NULL, "There should have been an entry"); 422 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) { 423 CardIdx_t c = sprt_entry->card(i); 424 if (c != SparsePRTEntry::NullEntry) { 425 prt->add_card(c); 426 } 427 } 428 // Now we can delete the sparse entry. 429 bool res = _sparse_table.delete_entry(from_hrm_ind); 430 assert(res, "It should have been there."); 431 } 432 } 433 assert(prt != NULL && prt->hr() == from_hr, "consequence"); 434 } 435 // Note that we can't assert "prt->hr() == from_hr", because of the 436 // possibility of concurrent reuse. But see head comment of 437 // OtherRegionsTable for why this is OK. 438 assert(prt != NULL, "Inv"); 439 440 prt->add_reference(from); 441 assert(contains_reference(from), "We just added it!"); 442 } 443 444 PerRegionTable* 445 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { 446 assert(ind < _max_fine_entries, "Preconditions."); 447 PerRegionTable* prt = _fine_grain_regions[ind]; 448 while (prt != NULL && prt->hr() != hr) { 449 prt = prt->collision_list_next(); 450 } 451 // Loop postcondition is the method postcondition. 452 return prt; 453 } 454 455 jint OtherRegionsTable::_n_coarsenings = 0; 456 457 PerRegionTable* OtherRegionsTable::delete_region_table() { 458 assert(_m->owned_by_self(), "Precondition"); 459 assert(_n_fine_entries == _max_fine_entries, "Precondition"); 460 PerRegionTable* max = NULL; 461 jint max_occ = 0; 462 PerRegionTable** max_prev = NULL; 463 size_t max_ind; 464 465 size_t i = _fine_eviction_start; 466 for (size_t k = 0; k < _fine_eviction_sample_size; k++) { 467 size_t ii = i; 468 // Make sure we get a non-NULL sample. 469 while (_fine_grain_regions[ii] == NULL) { 470 ii++; 471 if (ii == _max_fine_entries) ii = 0; 472 guarantee(ii != i, "We must find one."); 473 } 474 PerRegionTable** prev = &_fine_grain_regions[ii]; 475 PerRegionTable* cur = *prev; 476 while (cur != NULL) { 477 jint cur_occ = cur->occupied(); 478 if (max == NULL || cur_occ > max_occ) { 479 max = cur; 480 max_prev = prev; 481 max_ind = i; 482 max_occ = cur_occ; 483 } 484 prev = cur->collision_list_next_addr(); 485 cur = cur->collision_list_next(); 486 } 487 i = i + _fine_eviction_stride; 488 if (i >= _n_fine_entries) i = i - _n_fine_entries; 489 } 490 491 _fine_eviction_start++; 492 493 if (_fine_eviction_start >= _n_fine_entries) { 494 _fine_eviction_start -= _n_fine_entries; 495 } 496 497 guarantee(max != NULL, "Since _n_fine_entries > 0"); 498 guarantee(max_prev != NULL, "Since max != NULL."); 499 500 // Set the corresponding coarse bit. 501 size_t max_hrm_index = (size_t) max->hr()->hrm_index(); 502 if (!_coarse_map.at(max_hrm_index)) { 503 _coarse_map.at_put(max_hrm_index, true); 504 _n_coarse_entries++; 505 } 506 507 // Unsplice. 508 *max_prev = max->collision_list_next(); 509 Atomic::inc(&_n_coarsenings); 510 _n_fine_entries--; 511 return max; 512 } 513 514 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, 515 BitMap* region_bm, BitMap* card_bm) { 516 // First eliminated garbage regions from the coarse map. 517 log_develop_trace(gc, remset, scrub)("Scrubbing region %u:", _hr->hrm_index()); 518 519 assert(_coarse_map.size() == region_bm->size(), "Precondition"); 520 log_develop_trace(gc, remset, scrub)(" Coarse map: before = " SIZE_FORMAT "...", _n_coarse_entries); 521 _coarse_map.set_intersection(*region_bm); 522 _n_coarse_entries = _coarse_map.count_one_bits(); 523 log_develop_trace(gc, remset, scrub)(" after = " SIZE_FORMAT ".", _n_coarse_entries); 524 525 // Now do the fine-grained maps. 526 for (size_t i = 0; i < _max_fine_entries; i++) { 527 PerRegionTable* cur = _fine_grain_regions[i]; 528 PerRegionTable** prev = &_fine_grain_regions[i]; 529 while (cur != NULL) { 530 PerRegionTable* nxt = cur->collision_list_next(); 531 // If the entire region is dead, eliminate. 532 log_develop_trace(gc, remset, scrub)(" For other region %u:", cur->hr()->hrm_index()); 533 if (!region_bm->at((size_t) cur->hr()->hrm_index())) { 534 *prev = nxt; 535 cur->set_collision_list_next(NULL); 536 _n_fine_entries--; 537 log_develop_trace(gc, remset, scrub)(" deleted via region map."); 538 unlink_from_all(cur); 539 PerRegionTable::free(cur); 540 } else { 541 // Do fine-grain elimination. 542 log_develop_trace(gc, remset, scrub)(" occ: before = %4d.", cur->occupied()); 543 cur->scrub(ctbs, card_bm); 544 log_develop_trace(gc, remset, scrub)(" after = %4d.", cur->occupied()); 545 // Did that empty the table completely? 546 if (cur->occupied() == 0) { 547 *prev = nxt; 548 cur->set_collision_list_next(NULL); 549 _n_fine_entries--; 550 unlink_from_all(cur); 551 PerRegionTable::free(cur); 552 } else { 553 prev = cur->collision_list_next_addr(); 554 } 555 } 556 cur = nxt; 557 } 558 } 559 // Since we may have deleted a from_card_cache entry from the RS, clear 560 // the FCC. 561 clear_fcc(); 562 } 563 564 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const { 565 if (limit <= (size_t)G1RSetSparseRegionEntries) { 566 return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit; 567 } else { 568 // Current uses of this method may only use values less than G1RSetSparseRegionEntries 569 // for the limit. The solution, comparing against occupied() would be too slow 570 // at this time. 571 Unimplemented(); 572 return false; 573 } 574 } 575 576 bool OtherRegionsTable::is_empty() const { 577 return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL; 578 } 579 580 size_t OtherRegionsTable::occupied() const { 581 size_t sum = occ_fine(); 582 sum += occ_sparse(); 583 sum += occ_coarse(); 584 return sum; 585 } 586 587 size_t OtherRegionsTable::occ_fine() const { 588 size_t sum = 0; 589 590 size_t num = 0; 591 PerRegionTable * cur = _first_all_fine_prts; 592 while (cur != NULL) { 593 sum += cur->occupied(); 594 cur = cur->next(); 595 num++; 596 } 597 guarantee(num == _n_fine_entries, "just checking"); 598 return sum; 599 } 600 601 size_t OtherRegionsTable::occ_coarse() const { 602 return (_n_coarse_entries * HeapRegion::CardsPerRegion); 603 } 604 605 size_t OtherRegionsTable::occ_sparse() const { 606 return _sparse_table.occupied(); 607 } 608 609 size_t OtherRegionsTable::mem_size() const { 610 size_t sum = 0; 611 // all PRTs are of the same size so it is sufficient to query only one of them. 612 if (_first_all_fine_prts != NULL) { 613 assert(_last_all_fine_prts != NULL && 614 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant"); 615 sum += _first_all_fine_prts->mem_size() * _n_fine_entries; 616 } 617 sum += (sizeof(PerRegionTable*) * _max_fine_entries); 618 sum += (_coarse_map.size_in_words() * HeapWordSize); 619 sum += (_sparse_table.mem_size()); 620 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. 621 return sum; 622 } 623 624 size_t OtherRegionsTable::static_mem_size() { 625 return G1FromCardCache::static_mem_size(); 626 } 627 628 size_t OtherRegionsTable::fl_mem_size() { 629 return PerRegionTable::fl_mem_size(); 630 } 631 632 void OtherRegionsTable::clear_fcc() { 633 G1FromCardCache::clear(_hr->hrm_index()); 634 } 635 636 void OtherRegionsTable::clear() { 637 // if there are no entries, skip this step 638 if (_first_all_fine_prts != NULL) { 639 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking"); 640 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts); 641 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0])); 642 } else { 643 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking"); 644 } 645 646 _first_all_fine_prts = _last_all_fine_prts = NULL; 647 _sparse_table.clear(); 648 _coarse_map.clear(); 649 _n_fine_entries = 0; 650 _n_coarse_entries = 0; 651 652 clear_fcc(); 653 } 654 655 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { 656 // Cast away const in this case. 657 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag); 658 return contains_reference_locked(from); 659 } 660 661 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { 662 HeapRegion* hr = _g1h->heap_region_containing(from); 663 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); 664 // Is this region in the coarse map? 665 if (_coarse_map.at(hr_ind)) return true; 666 667 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, 668 hr); 669 if (prt != NULL) { 670 return prt->contains_reference(from); 671 672 } else { 673 uintptr_t from_card = 674 (uintptr_t(from) >> CardTableModRefBS::card_shift); 675 uintptr_t hr_bot_card_index = 676 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift; 677 assert(from_card >= hr_bot_card_index, "Inv"); 678 CardIdx_t card_index = from_card - hr_bot_card_index; 679 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion, 680 "Must be in range."); 681 return _sparse_table.contains_card(hr_ind, card_index); 682 } 683 } 684 685 void 686 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 687 _sparse_table.do_cleanup_work(hrrs_cleanup_task); 688 } 689 690 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, 691 HeapRegion* hr) 692 : _bot(bot), 693 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never), 694 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) { 695 reset_for_par_iteration(); 696 } 697 698 void HeapRegionRemSet::setup_remset_size() { 699 // Setup sparse and fine-grain tables sizes. 700 // table_size = base * (log(region_size / 1M) + 1) 701 const int LOG_M = 20; 702 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0); 703 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) { 704 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1); 705 } 706 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) { 707 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1); 708 } 709 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity"); 710 } 711 712 bool HeapRegionRemSet::claim_iter() { 713 if (_iter_state != Unclaimed) return false; 714 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed); 715 return (res == Unclaimed); 716 } 717 718 void HeapRegionRemSet::set_iter_complete() { 719 _iter_state = Complete; 720 } 721 722 bool HeapRegionRemSet::iter_is_complete() { 723 return _iter_state == Complete; 724 } 725 726 #ifndef PRODUCT 727 void HeapRegionRemSet::print() { 728 HeapRegionRemSetIterator iter(this); 729 size_t card_index; 730 while (iter.has_next(card_index)) { 731 HeapWord* card_start = _bot->address_for_index(card_index); 732 tty->print_cr(" Card " PTR_FORMAT, p2i(card_start)); 733 } 734 if (iter.n_yielded() != occupied()) { 735 tty->print_cr("Yielded disagrees with occupied:"); 736 tty->print_cr(" " SIZE_FORMAT_W(6) " yielded (" SIZE_FORMAT_W(6) 737 " coarse, " SIZE_FORMAT_W(6) " fine).", 738 iter.n_yielded(), 739 iter.n_yielded_coarse(), iter.n_yielded_fine()); 740 tty->print_cr(" " SIZE_FORMAT_W(6) " occ (" SIZE_FORMAT_W(6) 741 " coarse, " SIZE_FORMAT_W(6) " fine).", 742 occupied(), occ_coarse(), occ_fine()); 743 } 744 guarantee(iter.n_yielded() == occupied(), 745 "We should have yielded all the represented cards."); 746 } 747 #endif 748 749 void HeapRegionRemSet::cleanup() { 750 SparsePRT::cleanup_all(); 751 } 752 753 void HeapRegionRemSet::clear() { 754 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); 755 clear_locked(); 756 } 757 758 void HeapRegionRemSet::clear_locked() { 759 _code_roots.clear(); 760 _other_regions.clear(); 761 assert(occupied_locked() == 0, "Should be clear."); 762 reset_for_par_iteration(); 763 } 764 765 void HeapRegionRemSet::reset_for_par_iteration() { 766 _iter_state = Unclaimed; 767 _iter_claimed = 0; 768 // It's good to check this to make sure that the two methods are in sync. 769 assert(verify_ready_for_par_iteration(), "post-condition"); 770 } 771 772 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, 773 BitMap* region_bm, BitMap* card_bm) { 774 _other_regions.scrub(ctbs, region_bm, card_bm); 775 } 776 777 // Code roots support 778 // 779 // The code root set is protected by two separate locking schemes 780 // When at safepoint the per-hrrs lock must be held during modifications 781 // except when doing a full gc. 782 // When not at safepoint the CodeCache_lock must be held during modifications. 783 // When concurrent readers access the contains() function 784 // (during the evacuation phase) no removals are allowed. 785 786 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { 787 assert(nm != NULL, "sanity"); 788 assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()), 789 "should call add_strong_code_root_locked instead"); 790 // Optimistic unlocked contains-check 791 if (!_code_roots.contains(nm)) { 792 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag); 793 add_strong_code_root_locked(nm); 794 } 795 } 796 797 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { 798 assert(nm != NULL, "sanity"); 799 assert((CodeCache_lock->owned_by_self() || 800 (SafepointSynchronize::is_at_safepoint() && 801 (_m.owned_by_self() || Thread::current()->is_VM_thread()))), 802 "not safely locked"); 803 _code_roots.add(nm); 804 } 805 806 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { 807 assert(nm != NULL, "sanity"); 808 assert_locked_or_safepoint(CodeCache_lock); 809 810 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); 811 _code_roots.remove(nm); 812 813 // Check that there were no duplicates 814 guarantee(!_code_roots.contains(nm), "duplicate entry found"); 815 } 816 817 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { 818 _code_roots.nmethods_do(blk); 819 } 820 821 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { 822 _code_roots.clean(hr); 823 } 824 825 size_t HeapRegionRemSet::strong_code_roots_mem_size() { 826 return _code_roots.mem_size(); 827 } 828 829 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) : 830 _hrrs(hrrs), 831 _g1h(G1CollectedHeap::heap()), 832 _coarse_map(&hrrs->_other_regions._coarse_map), 833 _bot(hrrs->_bot), 834 _is(Sparse), 835 // Set these values so that we increment to the first region. 836 _coarse_cur_region_index(-1), 837 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), 838 _cur_card_in_prt(HeapRegion::CardsPerRegion), 839 _fine_cur_prt(NULL), 840 _n_yielded_coarse(0), 841 _n_yielded_fine(0), 842 _n_yielded_sparse(0), 843 _sparse_iter(&hrrs->_other_regions._sparse_table) {} 844 845 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { 846 if (_hrrs->_other_regions._n_coarse_entries == 0) return false; 847 // Go to the next card. 848 _coarse_cur_region_cur_card++; 849 // Was the last the last card in the current region? 850 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) { 851 // Yes: find the next region. This may leave _coarse_cur_region_index 852 // Set to the last index, in which case there are no more coarse 853 // regions. 854 _coarse_cur_region_index = 855 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); 856 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { 857 _coarse_cur_region_cur_card = 0; 858 HeapWord* r_bot = 859 _g1h->region_at((uint) _coarse_cur_region_index)->bottom(); 860 _cur_region_card_offset = _bot->index_for(r_bot); 861 } else { 862 return false; 863 } 864 } 865 // If we didn't return false above, then we can yield a card. 866 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; 867 return true; 868 } 869 870 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { 871 if (fine_has_next()) { 872 _cur_card_in_prt = 873 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 874 } 875 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) { 876 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for 877 // the remembered set. 878 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) { 879 return false; 880 } 881 PerRegionTable* next_prt = _fine_cur_prt->next(); 882 switch_to_prt(next_prt); 883 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); 884 } 885 886 card_index = _cur_region_card_offset + _cur_card_in_prt; 887 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion, 888 "Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt); 889 return true; 890 } 891 892 bool HeapRegionRemSetIterator::fine_has_next() { 893 return _cur_card_in_prt != HeapRegion::CardsPerRegion; 894 } 895 896 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) { 897 assert(prt != NULL, "Cannot switch to NULL prt"); 898 _fine_cur_prt = prt; 899 900 HeapWord* r_bot = _fine_cur_prt->hr()->bottom(); 901 _cur_region_card_offset = _bot->index_for(r_bot); 902 903 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1. 904 // To avoid special-casing this start case, and not miss the first bitmap 905 // entry, initialize _cur_region_cur_card with -1 instead of 0. 906 _cur_card_in_prt = (size_t)-1; 907 } 908 909 bool HeapRegionRemSetIterator::has_next(size_t& card_index) { 910 switch (_is) { 911 case Sparse: { 912 if (_sparse_iter.has_next(card_index)) { 913 _n_yielded_sparse++; 914 return true; 915 } 916 // Otherwise, deliberate fall-through 917 _is = Fine; 918 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts; 919 if (initial_fine_prt != NULL) { 920 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts); 921 } 922 } 923 case Fine: 924 if (fine_has_next(card_index)) { 925 _n_yielded_fine++; 926 return true; 927 } 928 // Otherwise, deliberate fall-through 929 _is = Coarse; 930 case Coarse: 931 if (coarse_has_next(card_index)) { 932 _n_yielded_coarse++; 933 return true; 934 } 935 // Otherwise... 936 break; 937 } 938 assert(ParallelGCThreads > 1 || 939 n_yielded() == _hrrs->occupied(), 940 "Should have yielded all the cards in the rem set " 941 "(in the non-par case)."); 942 return false; 943 } 944 945 void HeapRegionRemSet::reset_for_cleanup_tasks() { 946 SparsePRT::reset_for_cleanup_tasks(); 947 } 948 949 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) { 950 _other_regions.do_cleanup_work(hrrs_cleanup_task); 951 } 952 953 void 954 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) { 955 SparsePRT::finish_cleanup_task(hrrs_cleanup_task); 956 } 957 958 #ifndef PRODUCT 959 void HeapRegionRemSet::test() { 960 os::sleep(Thread::current(), (jlong)5000, false); 961 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 962 963 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same 964 // hash bucket. 965 HeapRegion* hr0 = g1h->region_at(0); 966 HeapRegion* hr1 = g1h->region_at(1); 967 HeapRegion* hr2 = g1h->region_at(5); 968 HeapRegion* hr3 = g1h->region_at(6); 969 HeapRegion* hr4 = g1h->region_at(7); 970 HeapRegion* hr5 = g1h->region_at(8); 971 972 HeapWord* hr1_start = hr1->bottom(); 973 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; 974 HeapWord* hr1_last = hr1->end() - 1; 975 976 HeapWord* hr2_start = hr2->bottom(); 977 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; 978 HeapWord* hr2_last = hr2->end() - 1; 979 980 HeapWord* hr3_start = hr3->bottom(); 981 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; 982 HeapWord* hr3_last = hr3->end() - 1; 983 984 HeapRegionRemSet* hrrs = hr0->rem_set(); 985 986 // Make three references from region 0x101... 987 hrrs->add_reference((OopOrNarrowOopStar)hr1_start); 988 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid); 989 hrrs->add_reference((OopOrNarrowOopStar)hr1_last); 990 991 hrrs->add_reference((OopOrNarrowOopStar)hr2_start); 992 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid); 993 hrrs->add_reference((OopOrNarrowOopStar)hr2_last); 994 995 hrrs->add_reference((OopOrNarrowOopStar)hr3_start); 996 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid); 997 hrrs->add_reference((OopOrNarrowOopStar)hr3_last); 998 999 // Now cause a coarsening. 1000 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom()); 1001 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); 1002 1003 // Now, does iteration yield these three? 1004 HeapRegionRemSetIterator iter(hrrs); 1005 size_t sum = 0; 1006 size_t card_index; 1007 while (iter.has_next(card_index)) { 1008 HeapWord* card_start = 1009 G1CollectedHeap::heap()->bot()->address_for_index(card_index); 1010 tty->print_cr(" Card " PTR_FORMAT ".", p2i(card_start)); 1011 sum++; 1012 } 1013 guarantee(sum == 11 - 3 + 2048, "Failure"); 1014 guarantee(sum == hrrs->occupied(), "Failure"); 1015 } 1016 #endif --- EOF ---