1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "memory/allocation.hpp" 25 #include "gc/g1/heapRegionBounds.inline.hpp" 26 27 #include "gc/shared/gcTimer.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shared/parallelCleaning.hpp" 30 31 #include "gc/shenandoah/brooksPointer.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 41 #include "gc/shenandoah/shenandoahHumongous.hpp" 42 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 44 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 45 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 46 47 #include "runtime/vmThread.hpp" 48 #include "services/mallocTracker.hpp" 49 50 const char* ShenandoahHeap::name() const { 51 return "Shenandoah"; 52 } 53 54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) { 55 HeapWord* cur = NULL; 56 for (cur = start; cur < end; cur++) { 57 tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur))); 58 } 59 } 60 61 class PrintHeapRegionsClosure : public 62 ShenandoahHeapRegionClosure { 63 private: 64 outputStream* _st; 65 public: 66 PrintHeapRegionsClosure() : _st(tty) {} 67 PrintHeapRegionsClosure(outputStream* st) : _st(st) {} 68 69 bool doHeapRegion(ShenandoahHeapRegion* r) { 70 r->print_on(_st); 71 return false; 72 } 73 }; 74 75 jint ShenandoahHeap::initialize() { 76 CollectedHeap::pre_initialize(); 77 78 size_t init_byte_size = collector_policy()->initial_heap_byte_size(); 79 size_t max_byte_size = collector_policy()->max_heap_byte_size(); 80 if (ShenandoahGCVerbose) 81 tty->print_cr("init_byte_size = "SIZE_FORMAT","SIZE_FORMAT_HEX" max_byte_size = "INT64_FORMAT","SIZE_FORMAT_HEX, 82 init_byte_size, init_byte_size, max_byte_size, max_byte_size); 83 84 Universe::check_alignment(max_byte_size, 85 ShenandoahHeapRegion::RegionSizeBytes, 86 "shenandoah heap"); 87 Universe::check_alignment(init_byte_size, 88 ShenandoahHeapRegion::RegionSizeBytes, 89 "shenandoah heap"); 90 91 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, 92 Arguments::conservative_max_heap_alignment()); 93 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size())); 94 95 set_barrier_set(new ShenandoahBarrierSet(this)); 96 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); 97 _storage.initialize(pgc_rs, init_byte_size); 98 if (ShenandoahGCVerbose) { 99 tty->print_cr("Calling initialize on reserved space base = "PTR_FORMAT" end = "PTR_FORMAT, 100 p2i(pgc_rs.base()), p2i(pgc_rs.base() + pgc_rs.size())); 101 } 102 103 _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 104 _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes; 105 _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 106 size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize; 107 assert(init_byte_size == _initialSize, "tautology"); 108 _ordered_regions = new ShenandoahHeapRegionSet(_max_regions); 109 _sorted_regions = new ShenandoahHeapRegionSet(_max_regions); 110 _collection_set = new ShenandoahCollectionSet(_max_regions); 111 _free_regions = new ShenandoahFreeSet(_max_regions); 112 113 size_t i = 0; 114 for (i = 0; i < _num_regions; i++) { 115 116 ShenandoahHeapRegion* current = new ShenandoahHeapRegion(); 117 current->initialize_heap_region((HeapWord*) pgc_rs.base() + 118 regionSizeWords * i, regionSizeWords, i); 119 _free_regions->add_region(current); 120 _ordered_regions->add_region(current); 121 _sorted_regions->add_region(current); 122 } 123 assert(((size_t) _ordered_regions->active_regions()) == _num_regions, ""); 124 _first_region = _ordered_regions->get(0); 125 _first_region_bottom = _first_region->bottom(); 126 assert((((size_t) _first_region_bottom) & 127 (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, 128 "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)); 129 130 _numAllocs = 0; 131 132 if (ShenandoahGCVerbose) { 133 tty->print("All Regions\n"); 134 print_heap_regions(); 135 tty->print("Free Regions\n"); 136 _free_regions->print(); 137 } 138 139 // The call below uses stuff (the SATB* things) that are in G1, but probably 140 // belong into a shared location. 141 JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, 142 SATB_Q_FL_lock, 143 20 /*G1SATBProcessCompletedThreshold */, 144 Shared_SATB_Q_lock); 145 146 // Reserve space for prev and next bitmap. 147 size_t bitmap_size = CMBitMap::compute_size(heap_rs.size()); 148 MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); 149 150 ReservedSpace bitmap0(ReservedSpace::allocation_align_size_up(bitmap_size)); 151 os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap"); 152 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); 153 MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); 154 _mark_bit_map0.initialize(heap_region, bitmap_region0); 155 _prev_mark_bit_map = &_mark_bit_map0; 156 157 ReservedSpace bitmap1(ReservedSpace::allocation_align_size_up(bitmap_size)); 158 os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap"); 159 MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC); 160 MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); 161 _mark_bit_map1.initialize(heap_region, bitmap_region1); 162 _next_mark_bit_map = &_mark_bit_map1; 163 164 // Initialize fast collection set test structure. 165 _in_cset_fast_test_length = _max_regions; 166 _in_cset_fast_test_base = 167 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); 168 _in_cset_fast_test = _in_cset_fast_test_base - 169 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 170 clear_cset_fast_test(); 171 172 _top_at_mark_starts_base = 173 NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC); 174 _top_at_mark_starts = _top_at_mark_starts_base - 175 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift); 176 177 for (i = 0; i < _num_regions; i++) { 178 _in_cset_fast_test_base[i] = false; // Not in cset 179 _top_at_mark_starts_base[i] = _ordered_regions->get(i)->bottom(); 180 } 181 182 _monitoring_support = new ShenandoahMonitoringSupport(this); 183 184 _concurrent_gc_thread = new ShenandoahConcurrentThread(); 185 186 ShenandoahMarkCompact::initialize(); 187 188 return JNI_OK; 189 } 190 191 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 192 CollectedHeap(), 193 _shenandoah_policy(policy), 194 _concurrent_mark_in_progress(false), 195 _evacuation_in_progress(false), 196 _full_gc_in_progress(false), 197 _free_regions(NULL), 198 _collection_set(NULL), 199 _bytes_allocated_since_cm(0), 200 _bytes_allocated_during_cm(0), 201 _max_allocated_gc(0), 202 _allocated_last_gc(0), 203 _used_start_gc(0), 204 _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)), 205 _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)), 206 _ref_processor(NULL), 207 _in_cset_fast_test(NULL), 208 _in_cset_fast_test_base(NULL), 209 _top_at_mark_starts(NULL), 210 _top_at_mark_starts_base(NULL), 211 _mark_bit_map0(), 212 _mark_bit_map1(), 213 _cancelled_concgc(false), 214 _need_update_refs(false), 215 _need_reset_bitmaps(false), 216 _growing_heap(0), 217 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()) 218 219 { 220 if (ShenandoahLogConfig) { 221 tty->print_cr("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads); 222 tty->print_cr("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads); 223 tty->print_cr("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled)); 224 } 225 _scm = new ShenandoahConcurrentMark(); 226 _used = 0; 227 // This is odd. They are concurrent gc threads, but they are also task threads. 228 // Framework doesn't allow both. 229 _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads, 230 /* are_GC_task_threads */true, 231 /* are_ConcurrentGC_threads */false); 232 _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads, 233 /* are_GC_task_threads */true, 234 /* are_ConcurrentGC_threads */false); 235 if ((_workers == NULL) || (_conc_workers == NULL)) { 236 vm_exit_during_initialization("Failed necessary allocation."); 237 } else { 238 _workers->initialize_workers(); 239 _conc_workers->initialize_workers(); 240 } 241 } 242 243 class ResetBitmapTask : public AbstractGangTask { 244 private: 245 ShenandoahHeapRegionSet* _regions; 246 247 public: 248 ResetBitmapTask(ShenandoahHeapRegionSet* regions) : 249 AbstractGangTask("Parallel Reset Bitmap Task"), 250 _regions(regions) { 251 _regions->clear_current_index(); 252 } 253 254 void work(uint worker_id) { 255 ShenandoahHeapRegion* region = _regions->claim_next(); 256 ShenandoahHeap* heap = ShenandoahHeap::heap(); 257 while (region != NULL) { 258 HeapWord* bottom = region->bottom(); 259 HeapWord* top = region->top_prev_mark_bitmap(); 260 region->set_top_prev_mark_bitmap(region->top_at_prev_mark_start()); 261 if (top > bottom) { 262 heap->reset_mark_bitmap_range(bottom, top); 263 } 264 region = _regions->claim_next(); 265 } 266 } 267 }; 268 269 void ShenandoahHeap::reset_mark_bitmap() { 270 GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc); 271 272 ResetBitmapTask task = ResetBitmapTask(_ordered_regions); 273 conc_workers()->run_task(&task); 274 } 275 276 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) { 277 _next_mark_bit_map->clear_range(MemRegion(from, to)); 278 } 279 280 bool ShenandoahHeap::is_bitmap_clear() { 281 HeapWord* start = _ordered_regions->bottom(); 282 HeapWord* end = _ordered_regions->end(); 283 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end; 284 } 285 286 void ShenandoahHeap::print_on(outputStream* st) const { 287 st->print("Shenandoah Heap"); 288 st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K); 289 st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K); 290 if (_concurrent_mark_in_progress) { 291 st->print("marking "); 292 } 293 if (_evacuation_in_progress) { 294 st->print("evacuating "); 295 } 296 if (_cancelled_concgc) { 297 st->print("cancelled "); 298 } 299 st->print("\n"); 300 301 if (Verbose) { 302 print_heap_regions(st); 303 } 304 } 305 306 class InitGCLABClosure : public ThreadClosure { 307 public: 308 void do_thread(Thread* thread) { 309 thread->gclab().initialize(true); 310 } 311 }; 312 313 void ShenandoahHeap::post_initialize() { 314 315 { 316 if (UseTLAB) { 317 InitGCLABClosure init_gclabs; 318 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 319 init_gclabs.do_thread(thread); 320 } 321 gc_threads_do(&init_gclabs); 322 } 323 } 324 _scm->initialize(); 325 326 ref_processing_init(); 327 328 _max_workers = MAX(_max_parallel_workers, _max_conc_workers); 329 } 330 331 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure { 332 size_t sum; 333 public: 334 335 CalculateUsedRegionClosure() { 336 sum = 0; 337 } 338 339 bool doHeapRegion(ShenandoahHeapRegion* r) { 340 sum = sum + r->used(); 341 return false; 342 } 343 344 size_t getResult() { return sum;} 345 }; 346 347 size_t ShenandoahHeap::calculateUsed() { 348 CalculateUsedRegionClosure cl; 349 heap_region_iterate(&cl); 350 return cl.getResult(); 351 } 352 353 void ShenandoahHeap::verify_heap_size_consistency() { 354 355 assert(calculateUsed() == used(), 356 "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()); 357 } 358 359 size_t ShenandoahHeap::used() const { 360 OrderAccess::acquire(); 361 return _used; 362 } 363 364 void ShenandoahHeap::increase_used(size_t bytes) { 365 Atomic::add(bytes, &_used); 366 } 367 368 void ShenandoahHeap::set_used(size_t bytes) { 369 _used = bytes; 370 OrderAccess::release(); 371 } 372 373 void ShenandoahHeap::decrease_used(size_t bytes) { 374 assert(_used >= bytes, "never decrease heap size by more than we've left"); 375 Atomic::add(-bytes, &_used); 376 } 377 378 size_t ShenandoahHeap::capacity() const { 379 return _num_regions * ShenandoahHeapRegion::RegionSizeBytes; 380 381 } 382 383 bool ShenandoahHeap::is_maximal_no_gc() const { 384 Unimplemented(); 385 return true; 386 } 387 388 size_t ShenandoahHeap::max_capacity() const { 389 return _max_regions * ShenandoahHeapRegion::RegionSizeBytes; 390 } 391 392 size_t ShenandoahHeap::min_capacity() const { 393 return _initialSize; 394 } 395 396 VirtualSpace* ShenandoahHeap::storage() const { 397 return (VirtualSpace*) &_storage; 398 } 399 400 bool ShenandoahHeap::is_in(const void* p) const { 401 HeapWord* first_region_bottom = _first_region->bottom(); 402 HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions; 403 return p > _first_region_bottom && p < last_region_end; 404 } 405 406 bool ShenandoahHeap::is_scavengable(const void* p) { 407 return true; 408 } 409 410 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { 411 // Retain tlab and allocate object in shared space if 412 // the amount free in the tlab is too large to discard. 413 if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { 414 thread->gclab().record_slow_allocation(size); 415 return NULL; 416 } 417 418 // Discard gclab and allocate a new one. 419 // To minimize fragmentation, the last GCLAB may be smaller than the rest. 420 size_t new_gclab_size = thread->gclab().compute_size(size); 421 422 thread->gclab().clear_before_allocation(); 423 424 if (new_gclab_size == 0) { 425 return NULL; 426 } 427 428 // Allocate a new GCLAB... 429 HeapWord* obj = allocate_new_gclab(new_gclab_size); 430 if (obj == NULL) { 431 return NULL; 432 } 433 434 if (ZeroTLAB) { 435 // ..and clear it. 436 Copy::zero_to_words(obj, new_gclab_size); 437 } else { 438 // ...and zap just allocated object. 439 #ifdef ASSERT 440 // Skip mangling the space corresponding to the object header to 441 // ensure that the returned space is not considered parsable by 442 // any concurrent GC thread. 443 size_t hdr_size = oopDesc::header_size(); 444 Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal); 445 #endif // ASSERT 446 } 447 thread->gclab().fill(obj, obj + size, new_gclab_size); 448 return obj; 449 } 450 451 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { 452 return allocate_new_tlab(word_size, false); 453 } 454 455 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) { 456 return allocate_new_tlab(word_size, true); 457 } 458 459 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) { 460 HeapWord* result = allocate_memory(word_size, evacuating); 461 462 if (result != NULL) { 463 assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region"); 464 _bytes_allocated_since_cm += word_size * HeapWordSize; 465 466 #ifdef ASSERT 467 if (ShenandoahTraceTLabs) 468 tty->print_cr("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result)); 469 #endif 470 471 } 472 return result; 473 } 474 475 ShenandoahHeap* ShenandoahHeap::heap() { 476 CollectedHeap* heap = Universe::heap(); 477 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()"); 478 assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap"); 479 return (ShenandoahHeap*) heap; 480 } 481 482 ShenandoahHeap* ShenandoahHeap::heap_no_check() { 483 CollectedHeap* heap = Universe::heap(); 484 return (ShenandoahHeap*) heap; 485 } 486 487 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) { 488 HeapWord* result = NULL; 489 result = allocate_memory_work(word_size); 490 491 if (result == NULL) { 492 bool retry; 493 do { 494 // Try to grow the heap. 495 retry = check_grow_heap(); 496 result = allocate_memory_work(word_size); 497 } while (retry && result == NULL); 498 } 499 500 if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation. 501 // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize); 502 // _free_regions->print(); 503 collect(GCCause::_allocation_failure); 504 result = allocate_memory_work(word_size); 505 } 506 507 // Only update monitoring counters when not calling from a write-barrier. 508 // Otherwise we might attempt to grab the Service_lock, which we must 509 // not do when coming from a write-barrier (because the thread might 510 // already hold the Compile_lock). 511 if (! evacuating) { 512 monitoring_support()->update_counters(); 513 } 514 515 return result; 516 } 517 518 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) { 519 return evacuating && Thread::current()->is_Java_thread(); 520 } 521 522 bool ShenandoahHeap::check_grow_heap() { 523 524 assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative"); 525 526 size_t available = _max_regions - _num_regions; 527 if (available == 0) { 528 return false; // Don't retry. 529 } 530 531 jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0); 532 if (growing == 0) { 533 // Only one thread succeeds this, and this one gets 534 // to grow the heap. All other threads can continue 535 // to allocate from the reserve. 536 grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions)); 537 538 // Reset it back to 0, so that other threads can take it again. 539 Atomic::store(0, &_growing_heap); 540 return true; 541 } else { 542 // Let other threads work, then try again. 543 os::naked_yield(); 544 return true; 545 } 546 } 547 548 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) { 549 550 if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) { 551 return allocate_large_memory(word_size); 552 } 553 554 jlong current_idx = _free_regions->current_index(); 555 assert(current_idx >= 0, "expect >= 0"); 556 ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx); 557 558 if (my_current_region == NULL) { 559 return NULL; // No more room to make a new region. OOM. 560 } 561 assert(my_current_region != NULL, "should have a region at this point"); 562 563 #ifdef ASSERT 564 if (my_current_region->is_in_collection_set()) { 565 print_heap_regions(); 566 } 567 #endif 568 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 569 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 570 571 HeapWord* result = my_current_region->par_allocate(word_size); 572 573 while (result == NULL && my_current_region != NULL) { 574 575 // 2nd attempt. Try next region. 576 size_t remaining = my_current_region->free(); 577 current_idx = _free_regions->par_claim_next(current_idx); 578 my_current_region = _free_regions->get(current_idx); 579 580 if (my_current_region == NULL) { 581 // tty->print("WTF: OOM error trying to allocate %ld words\n", word_size); 582 return NULL; // No more room to make a new region. OOM. 583 } 584 // _free_regions->increase_used(remaining); 585 assert(my_current_region != NULL, "should have a region at this point"); 586 assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists"); 587 assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions"); 588 result = my_current_region->par_allocate(word_size); 589 } 590 591 if (result != NULL) { 592 my_current_region->increase_live_data(word_size * HeapWordSize); 593 increase_used(word_size * HeapWordSize); 594 _free_regions->increase_used(word_size * HeapWordSize); 595 } 596 return result; 597 } 598 599 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) { 600 601 uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize); 602 assert(required_regions <= _max_regions, "sanity check"); 603 ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions); 604 605 HeapWord* result = NULL; 606 607 if (r != NULL) { 608 result = r->bottom(); 609 610 if (ShenandoahTraceHumongous) { 611 tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT, 612 (words * HeapWordSize) / K, p2i(result), r->region_number()); 613 } 614 } else { 615 if (ShenandoahTraceHumongous) { 616 tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed", 617 (words * HeapWordSize) / K, p2i(result)); 618 } 619 } 620 621 622 return result; 623 624 } 625 626 HeapWord* ShenandoahHeap::mem_allocate(size_t size, 627 bool* gc_overhead_limit_was_exceeded) { 628 629 #ifdef ASSERT 630 if (ShenandoahVerify && _numAllocs > 1000000) { 631 _numAllocs = 0; 632 } 633 _numAllocs++; 634 #endif 635 HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false); 636 HeapWord* result = filler + BrooksPointer::word_size(); 637 if (filler != NULL) { 638 initialize_brooks_ptr(oop(result)); 639 _bytes_allocated_since_cm += size * HeapWordSize; 640 #ifdef ASSERT 641 if (ShenandoahTraceAllocations) { 642 if (*gc_overhead_limit_was_exceeded) 643 tty->print("gc_overhead_limit_was_exceeded"); 644 tty->print_cr("mem_allocate object of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", 645 size, p2i(result), Thread::current()->osthread()->thread_id()); 646 } 647 #endif 648 649 assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region"); 650 return result; 651 } else { 652 /* 653 tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytes_allocated_since_cm); 654 { 655 print_heap_regions(); 656 tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count()); 657 _free_regions->print(); 658 } 659 */ 660 return NULL; 661 } 662 } 663 664 class ParallelEvacuateRegionObjectClosure : public ObjectClosure { 665 private: 666 ShenandoahHeap* _heap; 667 Thread* _thread; 668 public: 669 ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) : 670 _heap(heap), _thread(Thread::current()) { 671 } 672 673 void do_object(oop p) { 674 675 #ifdef ASSERT 676 if (ShenandoahTraceEvacuations) { 677 tty->print_cr("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size()); 678 } 679 #endif 680 681 assert(_heap->is_marked_prev(p), "expect only marked objects"); 682 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) { 683 _heap->evacuate_object(p, _thread); 684 } 685 } 686 }; 687 688 #ifdef ASSERT 689 class VerifyEvacuatedObjectClosure : public ObjectClosure { 690 691 public: 692 693 void do_object(oop p) { 694 if (ShenandoahHeap::heap()->is_marked_prev(p)) { 695 oop p_prime = oopDesc::bs()->read_barrier(p); 696 assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy"); 697 if (p->klass() != p_prime->klass()) { 698 tty->print_cr("copy has different class than original:"); 699 p->klass()->print_on(tty); 700 p_prime->klass()->print_on(tty); 701 } 702 assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)); 703 // assert(p->mark() == p_prime->mark(), "Should have the same mark"); 704 assert(p->size() == p_prime->size(), "Should be the same size"); 705 assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once"); 706 } 707 } 708 }; 709 710 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) { 711 if (ShenandoahGCVerbose) { 712 tty->print("Verifying From Region\n"); 713 from_region->print(); 714 } 715 716 VerifyEvacuatedObjectClosure verify_evacuation; 717 from_region->marked_object_iterate(&verify_evacuation); 718 } 719 #endif 720 721 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) { 722 723 assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier"); 724 725 ParallelEvacuateRegionObjectClosure evacuate_region(this); 726 727 #ifdef ASSERT 728 if (ShenandoahGCVerbose) { 729 tty->print_cr("parallel_evacuate_region starting from_region "SIZE_FORMAT": free_regions = "SIZE_FORMAT, 730 from_region->region_number(), _free_regions->count()); 731 } 732 #endif 733 734 marked_object_iterate(from_region, &evacuate_region); 735 736 #ifdef ASSERT 737 if (ShenandoahVerify && ! cancelled_concgc()) { 738 verify_evacuated_region(from_region); 739 } 740 if (ShenandoahGCVerbose) { 741 tty->print_cr("parallel_evacuate_region after from_region = "SIZE_FORMAT": free_regions = "SIZE_FORMAT, 742 from_region->region_number(), _free_regions->count()); 743 } 744 #endif 745 } 746 747 class ParallelEvacuationTask : public AbstractGangTask { 748 private: 749 ShenandoahHeap* _sh; 750 ShenandoahCollectionSet* _cs; 751 752 public: 753 ParallelEvacuationTask(ShenandoahHeap* sh, 754 ShenandoahCollectionSet* cs) : 755 AbstractGangTask("Parallel Evacuation Task"), 756 _cs(cs), 757 _sh(sh) {} 758 759 void work(uint worker_id) { 760 761 ShenandoahHeapRegion* from_hr = _cs->claim_next(); 762 763 while (from_hr != NULL) { 764 if (ShenandoahGCVerbose) { 765 tty->print_cr("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT, 766 worker_id, 767 from_hr->region_number()); 768 from_hr->print(); 769 } 770 771 assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early"); 772 _sh->parallel_evacuate_region(from_hr); 773 774 if (_sh->cancelled_concgc()) { 775 // tty->print("We cancelled concgc while working on region %d\n", from_hr->region_number()); 776 // from_hr->print(); 777 break; 778 } 779 from_hr = _cs->claim_next(); 780 } 781 } 782 }; 783 784 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure { 785 private: 786 ShenandoahHeap* _heap; 787 size_t _bytes_reclaimed; 788 public: 789 RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {} 790 791 bool doHeapRegion(ShenandoahHeapRegion* r) { 792 793 if (_heap->cancelled_concgc()) { 794 // The aborted marking bitmap needs to be cleared at the end of cycle. 795 // Setup the top-marker for this. 796 r->set_top_prev_mark_bitmap(r->top_at_mark_start()); 797 798 return false; 799 } 800 801 r->swap_top_at_mark_start(); 802 803 if (r->is_in_collection_set()) { 804 // tty->print_cr("recycling region "INT32_FORMAT":", r->region_number()); 805 // r->print_on(tty); 806 // tty->print_cr(" "); 807 _heap->decrease_used(r->used()); 808 _bytes_reclaimed += r->used(); 809 r->recycle(); 810 _heap->free_regions()->add_region(r); 811 } 812 813 return false; 814 } 815 size_t bytes_reclaimed() { return _bytes_reclaimed;} 816 void clear_bytes_reclaimed() {_bytes_reclaimed = 0;} 817 }; 818 819 void ShenandoahHeap::recycle_dirty_regions() { 820 RecycleDirtyRegionsClosure cl; 821 cl.clear_bytes_reclaimed(); 822 823 heap_region_iterate(&cl); 824 825 _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed()); 826 if (! cancelled_concgc()) { 827 clear_cset_fast_test(); 828 } 829 } 830 831 ShenandoahFreeSet* ShenandoahHeap::free_regions() { 832 return _free_regions; 833 } 834 835 void ShenandoahHeap::print_heap_regions(outputStream* st) const { 836 PrintHeapRegionsClosure pc1(st); 837 heap_region_iterate(&pc1); 838 } 839 840 class PrintAllRefsOopClosure: public ExtendedOopClosure { 841 private: 842 int _index; 843 const char* _prefix; 844 845 public: 846 PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {} 847 848 private: 849 template <class T> 850 inline void do_oop_work(T* p) { 851 oop o = oopDesc::load_decode_heap_oop(p); 852 if (o != NULL) { 853 if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) { 854 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass())); 855 } else { 856 // tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set())); 857 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o)); 858 } 859 } else { 860 tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o)); 861 } 862 _index++; 863 } 864 865 public: 866 void do_oop(oop* p) { 867 do_oop_work(p); 868 } 869 870 void do_oop(narrowOop* p) { 871 do_oop_work(p); 872 } 873 874 }; 875 876 class PrintAllRefsObjectClosure : public ObjectClosure { 877 const char* _prefix; 878 879 public: 880 PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {} 881 882 void do_object(oop p) { 883 if (ShenandoahHeap::heap()->is_in(p)) { 884 tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass())); 885 PrintAllRefsOopClosure cl(_prefix); 886 p->oop_iterate(&cl); 887 } 888 } 889 }; 890 891 void ShenandoahHeap::print_all_refs(const char* prefix) { 892 tty->print_cr("printing all references in the heap"); 893 tty->print_cr("root references:"); 894 895 ensure_parsability(false); 896 897 PrintAllRefsOopClosure cl(prefix); 898 roots_iterate(&cl); 899 900 tty->print_cr("heap references:"); 901 PrintAllRefsObjectClosure cl2(prefix); 902 object_iterate(&cl2); 903 } 904 905 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure { 906 private: 907 ShenandoahHeap* _heap; 908 909 public: 910 VerifyAfterMarkingOopClosure() : 911 _heap(ShenandoahHeap::heap()) { } 912 913 private: 914 template <class T> 915 inline void do_oop_work(T* p) { 916 oop o = oopDesc::load_decode_heap_oop(p); 917 if (o != NULL) { 918 if (! _heap->is_marked_prev(o)) { 919 _heap->print_heap_regions(); 920 _heap->print_all_refs("post-mark"); 921 tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s", 922 p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_prev(o))); 923 _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size()); 924 925 tty->print_cr("oop class: %s", o->klass()->internal_name()); 926 if (_heap->is_in(p)) { 927 oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p)); 928 tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer)); 929 referrer->print(); 930 _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size()); 931 } 932 tty->print_cr("heap region containing object:"); 933 _heap->heap_region_containing(o)->print(); 934 tty->print_cr("heap region containing referrer:"); 935 _heap->heap_region_containing(p)->print(); 936 tty->print_cr("heap region containing forwardee:"); 937 _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print(); 938 } 939 assert(o->is_oop(), "oop must be an oop"); 940 assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace"); 941 if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) { 942 tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->read_barrier(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->is_in_collection_set())); 943 tty->print_cr("oop class: %s", o->klass()->internal_name()); 944 } 945 assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded"); 946 assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions"); 947 assert(_heap->is_marked_prev(o), "live oops must be marked current"); 948 } 949 } 950 951 public: 952 void do_oop(oop* p) { 953 do_oop_work(p); 954 } 955 956 void do_oop(narrowOop* p) { 957 do_oop_work(p); 958 } 959 960 }; 961 962 class IterateMarkedCurrentObjectsClosure: public ObjectClosure { 963 private: 964 ShenandoahHeap* _heap; 965 ExtendedOopClosure* _cl; 966 public: 967 IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) : 968 _heap(ShenandoahHeap::heap()), _cl(cl) {}; 969 970 void do_object(oop p) { 971 if (_heap->is_marked_current(p)) { 972 p->oop_iterate(_cl); 973 } 974 } 975 976 }; 977 978 void ShenandoahHeap::verify_heap_after_marking() { 979 980 verify_heap_size_consistency(); 981 982 if (ShenandoahGCVerbose) { 983 tty->print("verifying heap after marking\n"); 984 } 985 VerifyAfterMarkingOopClosure cl; 986 roots_iterate(&cl); 987 988 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 989 object_iterate(&marked_oops); 990 } 991 992 993 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) { 994 assert(r->is_humongous_start(), "reclaim regions starting with the first one"); 995 996 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 997 size_t size = humongous_obj->size() + BrooksPointer::word_size(); 998 uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize); 999 uint index = r->region_number(); 1000 1001 1002 assert(r->getLiveData() == 0, "liveness must be zero"); 1003 1004 for(size_t i = 0; i < required_regions; i++) { 1005 1006 ShenandoahHeapRegion* region = _ordered_regions->get(index++); 1007 1008 assert((region->is_humongous_start() || region->is_humongous_continuation()), 1009 "expect correct humongous start or continuation"); 1010 1011 if (ShenandoahTraceHumongous) { 1012 tty->print_cr("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size); 1013 1014 region->print(); 1015 } 1016 1017 region->reset(); 1018 ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes); 1019 } 1020 } 1021 1022 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure { 1023 1024 bool doHeapRegion(ShenandoahHeapRegion* r) { 1025 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1026 1027 if (r->is_humongous_start()) { 1028 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 1029 if (! heap->is_marked_current(humongous_obj)) { 1030 1031 heap->reclaim_humongous_region_at(r); 1032 } 1033 } 1034 return false; 1035 } 1036 }; 1037 1038 #ifdef ASSERT 1039 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure { 1040 bool doHeapRegion(ShenandoahHeapRegion* r) { 1041 assert(!r->is_in_collection_set(), "Should have been cleared by now"); 1042 return false; 1043 } 1044 }; 1045 #endif 1046 1047 void ShenandoahHeap::prepare_for_concurrent_evacuation() { 1048 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF"); 1049 /* 1050 tty->print("Thread %d started prepare_for_concurrent_evacuation\n", 1051 Thread::current()->osthread()->thread_id()); 1052 */ 1053 if (!cancelled_concgc()) { 1054 1055 recycle_dirty_regions(); 1056 1057 ensure_parsability(true); 1058 1059 #ifdef ASSERT 1060 if (ShenandoahVerify) { 1061 verify_heap_after_marking(); 1062 } 1063 #endif 1064 1065 // NOTE: This needs to be done during a stop the world pause, because 1066 // putting regions into the collection set concurrently with Java threads 1067 // will create a race. In particular, acmp could fail because when we 1068 // resolve the first operand, the containing region might not yet be in 1069 // the collection set, and thus return the original oop. When the 2nd 1070 // operand gets resolved, the region could be in the collection set 1071 // and the oop gets evacuated. If both operands have originally been 1072 // the same, we get false negatives. 1073 1074 1075 _collection_set->clear(); 1076 _free_regions->clear(); 1077 1078 ShenandoahReclaimHumongousRegionsClosure reclaim; 1079 heap_region_iterate(&reclaim); 1080 1081 // _ordered_regions->print(); 1082 #ifdef ASSERT 1083 CheckCollectionSetClosure ccsc; 1084 _ordered_regions->heap_region_iterate(&ccsc); 1085 #endif 1086 1087 _shenandoah_policy->choose_collection_set(_collection_set); 1088 1089 _shenandoah_policy->choose_free_set(_free_regions); 1090 1091 /* 1092 tty->print("Sorted free regions\n"); 1093 _free_regions->print(); 1094 */ 1095 1096 if (_collection_set->count() == 0) 1097 cancel_concgc(); 1098 1099 _bytes_allocated_since_cm = 0; 1100 1101 Universe::update_heap_info_at_gc(); 1102 } 1103 } 1104 1105 1106 class RetireTLABClosure : public ThreadClosure { 1107 private: 1108 bool _retire; 1109 1110 public: 1111 RetireTLABClosure(bool retire) : _retire(retire) { 1112 } 1113 1114 void do_thread(Thread* thread) { 1115 thread->gclab().make_parsable(_retire); 1116 } 1117 }; 1118 1119 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { 1120 if (UseTLAB) { 1121 CollectedHeap::ensure_parsability(retire_tlabs); 1122 1123 RetireTLABClosure cl(retire_tlabs); 1124 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1125 cl.do_thread(thread); 1126 } 1127 gc_threads_do(&cl); 1128 } 1129 } 1130 1131 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { 1132 private: 1133 ShenandoahHeap* _heap; 1134 Thread* _thread; 1135 public: 1136 ShenandoahEvacuateUpdateRootsClosure() : 1137 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { 1138 } 1139 1140 private: 1141 template <class T> 1142 void do_oop_work(T* p) { 1143 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); 1144 1145 T o = oopDesc::load_heap_oop(p); 1146 if (! oopDesc::is_null(o)) { 1147 oop obj = oopDesc::decode_heap_oop_not_null(o); 1148 if (_heap->in_cset_fast_test((HeapWord*) obj)) { 1149 assert(_heap->is_marked_prev(obj), "only evacuate marked objects %d %d", _heap->is_marked_prev(obj), _heap->is_marked_prev(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))); 1150 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1151 if (oopDesc::unsafe_equals(resolved, obj)) { 1152 resolved = _heap->evacuate_object(obj, _thread); 1153 } 1154 oopDesc::encode_store_heap_oop(p, resolved); 1155 } 1156 } 1157 #ifdef ASSERT 1158 else { 1159 // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj))); 1160 } 1161 #endif 1162 } 1163 1164 public: 1165 void do_oop(oop* p) { 1166 do_oop_work(p); 1167 } 1168 void do_oop(narrowOop* p) { 1169 do_oop_work(p); 1170 } 1171 }; 1172 1173 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { 1174 ShenandoahRootProcessor* _rp; 1175 public: 1176 1177 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootProcessor* rp) : 1178 AbstractGangTask("Shenandoah evacuate and update roots"), 1179 _rp(rp) 1180 { 1181 // Nothing else to do. 1182 } 1183 1184 void work(uint worker_id) { 1185 ShenandoahEvacuateUpdateRootsClosure cl; 1186 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 1187 1188 _rp->process_evacuate_roots(&cl, &blobsCl, worker_id); 1189 } 1190 }; 1191 1192 void ShenandoahHeap::evacuate_and_update_roots() { 1193 1194 COMPILER2_PRESENT(DerivedPointerTable::clear()); 1195 1196 if (ShenandoahVerifyReadsToFromSpace) { 1197 set_from_region_protection(false); 1198 } 1199 1200 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1201 ClassLoaderDataGraph::clear_claimed_marks(); 1202 1203 { 1204 ShenandoahRootProcessor rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots); 1205 ShenandoahEvacuateUpdateRootsTask roots_task(&rp); 1206 workers()->run_task(&roots_task); 1207 } 1208 1209 if (ShenandoahVerifyReadsToFromSpace) { 1210 set_from_region_protection(true); 1211 } 1212 1213 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 1214 1215 } 1216 1217 1218 void ShenandoahHeap::do_evacuation() { 1219 1220 parallel_evacuate(); 1221 1222 if (ShenandoahVerify && ! cancelled_concgc()) { 1223 VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation; 1224 if (Thread::current()->is_VM_thread()) { 1225 verify_after_evacuation.doit(); 1226 } else { 1227 VMThread::execute(&verify_after_evacuation); 1228 } 1229 } 1230 1231 } 1232 1233 void ShenandoahHeap::parallel_evacuate() { 1234 1235 if (! cancelled_concgc()) { 1236 1237 if (ShenandoahGCVerbose) { 1238 tty->print_cr("starting parallel_evacuate"); 1239 // PrintHeapRegionsClosure pc1; 1240 // heap_region_iterate(&pc1); 1241 } 1242 1243 _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac); 1244 1245 if (ShenandoahGCVerbose) { 1246 tty->print("Printing all available regions"); 1247 print_heap_regions(); 1248 } 1249 1250 if (ShenandoahPrintCollectionSet) { 1251 tty->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count()); 1252 _collection_set->print(); 1253 1254 tty->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count()); 1255 _free_regions->print(); 1256 1257 // if (_collection_set->length() == 0) 1258 // print_heap_regions(); 1259 } 1260 1261 ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set); 1262 1263 conc_workers()->run_task(&evacuationTask); 1264 1265 if (ShenandoahGCVerbose) { 1266 1267 tty->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", 1268 _collection_set->count()); 1269 1270 _collection_set->print(); 1271 1272 tty->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", 1273 _free_regions->count()); 1274 _free_regions->print(); 1275 1276 tty->print_cr("finished parallel_evacuate"); 1277 print_heap_regions(); 1278 1279 tty->print_cr("all regions after evacuation:"); 1280 print_heap_regions(); 1281 } 1282 1283 _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac); 1284 1285 if (cancelled_concgc()) { 1286 // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id()); 1287 concurrent_thread()->schedule_full_gc(); 1288 // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n", 1289 // Thread::current()->osthread()->thread_id()); 1290 } 1291 } 1292 } 1293 1294 class VerifyEvacuationClosure: public ExtendedOopClosure { 1295 private: 1296 ShenandoahHeap* _heap; 1297 ShenandoahHeapRegion* _from_region; 1298 1299 public: 1300 VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) : 1301 _heap(ShenandoahHeap::heap()), _from_region(from_region) { } 1302 private: 1303 template <class T> 1304 inline void do_oop_work(T* p) { 1305 oop heap_oop = oopDesc::load_decode_heap_oop(p); 1306 if (! oopDesc::is_null(heap_oop)) { 1307 guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)); 1308 } 1309 } 1310 1311 public: 1312 void do_oop(oop* p) { 1313 do_oop_work(p); 1314 } 1315 1316 void do_oop(narrowOop* p) { 1317 do_oop_work(p); 1318 } 1319 1320 }; 1321 1322 void ShenandoahHeap::roots_iterate(OopClosure* cl) { 1323 1324 assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped"); 1325 1326 CodeBlobToOopClosure blobsCl(cl, false); 1327 CLDToOopClosure cldCl(cl); 1328 1329 ClassLoaderDataGraph::clear_claimed_marks(); 1330 1331 ShenandoahRootProcessor rp(this, 1); 1332 rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0); 1333 } 1334 1335 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) { 1336 1337 VerifyEvacuationClosure rootsCl(from_region); 1338 roots_iterate(&rootsCl); 1339 1340 } 1341 1342 bool ShenandoahHeap::supports_tlab_allocation() const { 1343 return true; 1344 } 1345 1346 1347 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { 1348 jlong idx = _free_regions->current_index(); 1349 ShenandoahHeapRegion* current = _free_regions->get(idx); 1350 if (current == NULL) 1351 return 0; 1352 else if (current->free() > MinTLABSize) { 1353 return current->free(); 1354 } else { 1355 return MinTLABSize; 1356 } 1357 } 1358 1359 size_t ShenandoahHeap::max_tlab_size() const { 1360 return ShenandoahHeapRegion::RegionSizeBytes; 1361 } 1362 1363 class ResizeGCLABClosure : public ThreadClosure { 1364 public: 1365 void do_thread(Thread* thread) { 1366 thread->gclab().resize(); 1367 } 1368 }; 1369 1370 void ShenandoahHeap::resize_all_tlabs() { 1371 CollectedHeap::resize_all_tlabs(); 1372 1373 ResizeGCLABClosure cl; 1374 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1375 cl.do_thread(thread); 1376 } 1377 gc_threads_do(&cl); 1378 1379 } 1380 1381 class AccumulateStatisticsGCLABClosure : public ThreadClosure { 1382 public: 1383 void do_thread(Thread* thread) { 1384 thread->gclab().accumulate_statistics(); 1385 thread->gclab().initialize_statistics(); 1386 } 1387 }; 1388 1389 void ShenandoahHeap::accumulate_statistics_all_gclabs() { 1390 1391 AccumulateStatisticsGCLABClosure cl; 1392 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 1393 cl.do_thread(thread); 1394 } 1395 gc_threads_do(&cl); 1396 } 1397 1398 bool ShenandoahHeap::can_elide_tlab_store_barriers() const { 1399 return true; 1400 } 1401 1402 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1403 // Overridden to do nothing. 1404 return new_obj; 1405 } 1406 1407 bool ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) { 1408 return true; 1409 } 1410 1411 bool ShenandoahHeap::card_mark_must_follow_store() const { 1412 return false; 1413 } 1414 1415 void ShenandoahHeap::collect(GCCause::Cause cause) { 1416 assert(cause != GCCause::_gc_locker, "no JNI critical callback"); 1417 if (GCCause::is_user_requested_gc(cause)) { 1418 if (! DisableExplicitGC) { 1419 cancel_concgc(); 1420 _concurrent_gc_thread->do_full_gc(cause); 1421 } 1422 } else if (cause == GCCause::_allocation_failure) { 1423 1424 cancel_concgc(); 1425 collector_policy()->set_should_clear_all_soft_refs(true); 1426 _concurrent_gc_thread->do_full_gc(cause); 1427 1428 } 1429 } 1430 1431 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { 1432 //assert(false, "Shouldn't need to do full collections"); 1433 } 1434 1435 AdaptiveSizePolicy* ShenandoahHeap::size_policy() { 1436 Unimplemented(); 1437 return NULL; 1438 1439 } 1440 1441 CollectorPolicy* ShenandoahHeap::collector_policy() const { 1442 return _shenandoah_policy; 1443 } 1444 1445 1446 HeapWord* ShenandoahHeap::block_start(const void* addr) const { 1447 Space* sp = heap_region_containing(addr); 1448 if (sp != NULL) { 1449 return sp->block_start(addr); 1450 } 1451 return NULL; 1452 } 1453 1454 size_t ShenandoahHeap::block_size(const HeapWord* addr) const { 1455 Space* sp = heap_region_containing(addr); 1456 assert(sp != NULL, "block_size of address outside of heap"); 1457 return sp->block_size(addr); 1458 } 1459 1460 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { 1461 Space* sp = heap_region_containing(addr); 1462 return sp->block_is_obj(addr); 1463 } 1464 1465 jlong ShenandoahHeap::millis_since_last_gc() { 1466 return 0; 1467 } 1468 1469 void ShenandoahHeap::prepare_for_verify() { 1470 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1471 ensure_parsability(false); 1472 } 1473 } 1474 1475 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { 1476 workers()->print_worker_threads_on(st); 1477 conc_workers()->print_worker_threads_on(st); 1478 } 1479 1480 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { 1481 workers()->threads_do(tcl); 1482 conc_workers()->threads_do(tcl); 1483 } 1484 1485 void ShenandoahHeap::print_tracing_info() const { 1486 if (ShenandoahPrintGCDetails) { 1487 _shenandoah_policy->print_tracing_info(); 1488 } 1489 } 1490 1491 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure { 1492 private: 1493 ShenandoahHeap* _heap; 1494 VerifyOption _vo; 1495 bool _failures; 1496 public: 1497 // _vo == UsePrevMarking -> use "prev" marking information, 1498 // _vo == UseNextMarking -> use "next" marking information, 1499 // _vo == UseMarkWord -> use mark word from object header. 1500 ShenandoahVerifyRootsClosure(VerifyOption vo) : 1501 _heap(ShenandoahHeap::heap()), 1502 _vo(vo), 1503 _failures(false) { } 1504 1505 bool failures() { return _failures; } 1506 1507 private: 1508 template <class T> 1509 inline void do_oop_work(T* p) { 1510 oop obj = oopDesc::load_decode_heap_oop(p); 1511 if (! oopDesc::is_null(obj) && ! obj->is_oop()) { 1512 { // Just for debugging. 1513 tty->print_cr("Root location "PTR_FORMAT 1514 "verified "PTR_FORMAT, p2i(p), p2i((void*) obj)); 1515 // obj->print_on(tty); 1516 } 1517 } 1518 guarantee(obj->is_oop_or_null(), "is oop or null"); 1519 } 1520 1521 public: 1522 void do_oop(oop* p) { 1523 do_oop_work(p); 1524 } 1525 1526 void do_oop(narrowOop* p) { 1527 do_oop_work(p); 1528 } 1529 1530 }; 1531 1532 class ShenandoahVerifyHeapClosure: public ObjectClosure { 1533 private: 1534 ShenandoahVerifyRootsClosure _rootsCl; 1535 public: 1536 ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) : 1537 _rootsCl(rc) {}; 1538 1539 void do_object(oop p) { 1540 _rootsCl.do_oop(&p); 1541 } 1542 }; 1543 1544 class ShenandoahVerifyKlassClosure: public KlassClosure { 1545 OopClosure *_oop_closure; 1546 public: 1547 ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {} 1548 void do_klass(Klass* k) { 1549 k->oops_do(_oop_closure); 1550 } 1551 }; 1552 1553 void ShenandoahHeap::verify(VerifyOption vo) { 1554 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { 1555 1556 ShenandoahVerifyRootsClosure rootsCl(vo); 1557 1558 assert(Thread::current()->is_VM_thread(), 1559 "Expected to be executed serially by the VM thread at this point"); 1560 1561 roots_iterate(&rootsCl); 1562 1563 bool failures = rootsCl.failures(); 1564 if (ShenandoahGCVerbose) 1565 tty->print("verify failures: %s", BOOL_TO_STR(failures)); 1566 1567 ShenandoahVerifyHeapClosure heapCl(rootsCl); 1568 1569 object_iterate(&heapCl); 1570 // TODO: Implement rest of it. 1571 #ifdef ASSERT_DISABLED 1572 verify_live(); 1573 #endif 1574 } else { 1575 tty->print("(SKIPPING roots, heapRegions, remset) "); 1576 } 1577 } 1578 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { 1579 return _free_regions->capacity(); 1580 } 1581 1582 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure { 1583 ObjectClosure* _cl; 1584 public: 1585 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} 1586 bool doHeapRegion(ShenandoahHeapRegion* r) { 1587 ShenandoahHeap::heap()->marked_object_iterate(r, _cl); 1588 return false; 1589 } 1590 }; 1591 1592 void ShenandoahHeap::object_iterate(ObjectClosure* cl) { 1593 ShenandoahIterateObjectClosureRegionClosure blk(cl); 1594 heap_region_iterate(&blk, false, true); 1595 } 1596 1597 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { 1598 Unimplemented(); 1599 } 1600 1601 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) { 1602 marked_object_iterate(region, cl, region->bottom(), region->top()); 1603 } 1604 1605 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl, 1606 HeapWord* addr, HeapWord* limit) { 1607 addr += BrooksPointer::word_size(); 1608 HeapWord* last_addr = NULL; 1609 size_t last_size = 0; 1610 HeapWord* top_at_mark_start = region->top_at_prev_mark_start(); 1611 HeapWord* heap_end = _ordered_regions->end(); 1612 while (addr < limit) { 1613 if (addr < top_at_mark_start) { 1614 HeapWord* end = top_at_mark_start + BrooksPointer::word_size(); 1615 end = MIN2(end, heap_end); 1616 addr = _prev_mark_bit_map->getNextMarkedWordAddress(addr, end); 1617 } 1618 if (addr < limit) { 1619 oop obj = oop(addr); 1620 assert(is_marked_prev(obj), "object expected to be marked"); 1621 cl->do_object(obj); 1622 last_addr = addr; 1623 last_size = obj->size(); 1624 addr += obj->size() + BrooksPointer::word_size(); 1625 } else { 1626 break; 1627 } 1628 } 1629 } 1630 1631 // Apply blk->doHeapRegion() on all committed regions in address order, 1632 // terminating the iteration early if doHeapRegion() returns true. 1633 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const { 1634 for (size_t i = 0; i < _num_regions; i++) { 1635 ShenandoahHeapRegion* current = _ordered_regions->get(i); 1636 if (skip_humongous_continuation && current->is_humongous_continuation()) { 1637 continue; 1638 } 1639 if (skip_dirty_regions && current->is_in_collection_set()) { 1640 continue; 1641 } 1642 if (blk->doHeapRegion(current)) { 1643 return; 1644 } 1645 } 1646 } 1647 1648 class ClearLivenessClosure : public ShenandoahHeapRegionClosure { 1649 ShenandoahHeap* sh; 1650 public: 1651 ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { } 1652 1653 bool doHeapRegion(ShenandoahHeapRegion* r) { 1654 r->clearLiveData(); 1655 r->init_top_at_mark_start(); 1656 return false; 1657 } 1658 }; 1659 1660 1661 void ShenandoahHeap::start_concurrent_marking() { 1662 1663 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats); 1664 accumulate_statistics_all_tlabs(); 1665 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats); 1666 1667 set_concurrent_mark_in_progress(true); 1668 // We need to reset all TLABs because we'd lose marks on all objects allocated in them. 1669 if (UseTLAB) { 1670 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable); 1671 ensure_parsability(true); 1672 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable); 1673 } 1674 1675 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm); 1676 _used_start_gc = used(); 1677 1678 #ifdef ASSERT 1679 if (ShenandoahDumpHeapBeforeConcurrentMark) { 1680 ensure_parsability(false); 1681 print_all_refs("pre-mark"); 1682 } 1683 #endif 1684 1685 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness); 1686 ClearLivenessClosure clc(this); 1687 heap_region_iterate(&clc); 1688 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness); 1689 1690 // print_all_refs("pre -mark"); 1691 1692 // oopDesc::_debug = true; 1693 1694 // Make above changes visible to worker threads 1695 OrderAccess::fence(); 1696 1697 shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots); 1698 concurrentMark()->init_mark_roots(); 1699 shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots); 1700 1701 // print_all_refs("pre-mark2"); 1702 } 1703 1704 1705 class VerifyLivenessClosure : public ExtendedOopClosure { 1706 1707 ShenandoahHeap* _sh; 1708 1709 public: 1710 VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {} 1711 1712 template<class T> void do_oop_nv(T* p) { 1713 T heap_oop = oopDesc::load_heap_oop(p); 1714 if (!oopDesc::is_null(heap_oop)) { 1715 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1716 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1717 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s", 1718 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1719 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) 1720 ); 1721 obj = oopDesc::bs()->read_barrier(obj); 1722 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1723 guarantee(obj->is_oop(), "is_oop"); 1724 ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); 1725 if (! sh->is_marked_current(obj)) { 1726 sh->print_on(tty); 1727 } 1728 assert(sh->is_marked_current(obj), "Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s", 1729 p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))); 1730 } 1731 } 1732 1733 void do_oop(oop* p) { do_oop_nv(p); } 1734 void do_oop(narrowOop* p) { do_oop_nv(p); } 1735 1736 }; 1737 1738 void ShenandoahHeap::verify_live() { 1739 1740 VerifyLivenessClosure cl; 1741 roots_iterate(&cl); 1742 1743 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1744 object_iterate(&marked_oops); 1745 1746 } 1747 1748 class VerifyAfterEvacuationClosure : public ExtendedOopClosure { 1749 1750 ShenandoahHeap* _sh; 1751 1752 public: 1753 VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {} 1754 1755 template<class T> void do_oop_nv(T* p) { 1756 T heap_oop = oopDesc::load_heap_oop(p); 1757 if (!oopDesc::is_null(heap_oop)) { 1758 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1759 guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), 1760 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s", 1761 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()), 1762 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)) 1763 ); 1764 obj = oopDesc::bs()->read_barrier(obj); 1765 guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions"); 1766 guarantee(obj->is_oop(), "is_oop"); 1767 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1768 } 1769 } 1770 1771 void do_oop(oop* p) { do_oop_nv(p); } 1772 void do_oop(narrowOop* p) { do_oop_nv(p); } 1773 1774 }; 1775 1776 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure { 1777 1778 ShenandoahHeap* _sh; 1779 1780 public: 1781 VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {} 1782 1783 template<class T> void do_oop_nv(T* p) { 1784 T heap_oop = oopDesc::load_heap_oop(p); 1785 if (!oopDesc::is_null(heap_oop)) { 1786 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); 1787 guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()), 1788 "no live reference must point to from-space, is_marked: %s", 1789 BOOL_TO_STR(_sh->is_marked_current(obj))); 1790 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)) && _sh->is_in(p)) { 1791 tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p)); 1792 } 1793 guarantee(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "no live reference must point to forwarded object"); 1794 guarantee(obj->is_oop(), "is_oop"); 1795 guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace"); 1796 } 1797 } 1798 1799 void do_oop(oop* p) { do_oop_nv(p); } 1800 void do_oop(narrowOop* p) { do_oop_nv(p); } 1801 1802 }; 1803 1804 void ShenandoahHeap::verify_heap_after_evacuation() { 1805 1806 verify_heap_size_consistency(); 1807 1808 ensure_parsability(false); 1809 1810 VerifyAfterEvacuationClosure cl; 1811 roots_iterate(&cl); 1812 1813 IterateMarkedCurrentObjectsClosure marked_oops(&cl); 1814 object_iterate(&marked_oops); 1815 1816 } 1817 1818 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure { 1819 public: 1820 bool doHeapRegion(ShenandoahHeapRegion* r) { 1821 assert(! r->is_in_collection_set(), "no region must be in collection set"); 1822 assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set"); 1823 return false; 1824 } 1825 }; 1826 1827 void ShenandoahHeap::swap_mark_bitmaps() { 1828 CMBitMap* tmp = _prev_mark_bit_map; 1829 _prev_mark_bit_map = _next_mark_bit_map; 1830 _next_mark_bit_map = tmp; 1831 } 1832 1833 void ShenandoahHeap::stop_concurrent_marking() { 1834 assert(concurrent_mark_in_progress(), "How else could we get here?"); 1835 if (! cancelled_concgc()) { 1836 // If we needed to update refs, and concurrent marking has been cancelled, 1837 // we need to finish updating references. 1838 set_need_update_refs(false); 1839 swap_mark_bitmaps(); 1840 } 1841 set_concurrent_mark_in_progress(false); 1842 if (ShenandoahGCVerbose) { 1843 print_heap_regions(); 1844 } 1845 1846 } 1847 1848 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { 1849 _concurrent_mark_in_progress = in_progress; 1850 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress); 1851 } 1852 1853 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { 1854 JavaThread::set_evacuation_in_progress_all_threads(in_progress); 1855 _evacuation_in_progress = in_progress; 1856 OrderAccess::fence(); 1857 } 1858 1859 void ShenandoahHeap::verify_copy(oop p,oop c){ 1860 assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly"); 1861 assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct"); 1862 if (p->klass() != c->klass()) { 1863 print_heap_regions(); 1864 } 1865 assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()); 1866 assert(p->size() == c->size(), "verify size"); 1867 // Object may have been locked between copy and verification 1868 // assert(p->mark() == c->mark(), "verify mark"); 1869 assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once"); 1870 } 1871 1872 void ShenandoahHeap::oom_during_evacuation() { 1873 // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d", 1874 // Thread::current()->osthread()->thread_id()); 1875 1876 // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC. 1877 collector_policy()->set_should_clear_all_soft_refs(true); 1878 concurrent_thread()->schedule_full_gc(); 1879 cancel_concgc(); 1880 1881 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 1882 if (ShenandoahWarnings) { 1883 tty->print_cr("OOM during evacuation. Let Java thread wait until evacuation settlded.."); 1884 } 1885 while (_evacuation_in_progress) { // wait. 1886 Thread::current()->_ParkEvent->park(1) ; 1887 } 1888 } 1889 1890 } 1891 1892 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) { 1893 HeapWord* result = obj + BrooksPointer::word_size(); 1894 initialize_brooks_ptr(oop(result)); 1895 return result; 1896 } 1897 1898 uint ShenandoahHeap::oop_extra_words() { 1899 return BrooksPointer::word_size(); 1900 } 1901 1902 void ShenandoahHeap::grow_heap_by(size_t num_regions) { 1903 size_t base = _num_regions; 1904 ensure_new_regions(num_regions); 1905 1906 ShenandoahHeapRegion* regions[num_regions]; 1907 for (size_t i = 0; i < num_regions; i++) { 1908 ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(); 1909 size_t new_region_index = i + base; 1910 HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index; 1911 new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index); 1912 if (ShenandoahGCVerbose) { 1913 tty->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index); 1914 new_region->print(); 1915 } 1916 1917 assert(_ordered_regions->active_regions() == new_region->region_number(), "must match"); 1918 _ordered_regions->add_region(new_region); 1919 _sorted_regions->add_region(new_region); 1920 _in_cset_fast_test_base[new_region_index] = false; // Not in cset 1921 _top_at_mark_starts_base[new_region_index] = new_region->bottom(); 1922 1923 regions[i] = new_region; 1924 } 1925 _free_regions->par_add_regions(regions, 0, num_regions, num_regions); 1926 } 1927 1928 void ShenandoahHeap::ensure_new_regions(size_t new_regions) { 1929 1930 size_t num_regions = _num_regions; 1931 size_t new_num_regions = num_regions + new_regions; 1932 assert(new_num_regions <= _max_regions, "we checked this earlier"); 1933 1934 size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes; 1935 if (ShenandoahGCVerbose) { 1936 tty->print_cr("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions); 1937 } 1938 bool success = _storage.expand_by(expand_size); 1939 assert(success, "should always be able to expand by requested size"); 1940 1941 _num_regions = new_num_regions; 1942 1943 } 1944 1945 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : 1946 _heap(ShenandoahHeap::heap_no_check()) { 1947 } 1948 1949 void ShenandoahIsAliveClosure::init(ShenandoahHeap* heap) { 1950 _heap = heap; 1951 } 1952 1953 bool ShenandoahIsAliveClosure::do_object_b(oop obj) { 1954 1955 assert(_heap != NULL, "sanity"); 1956 #ifdef ASSERT 1957 if (_heap->concurrent_mark_in_progress()) { 1958 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1959 } 1960 #endif 1961 assert(!oopDesc::is_null(obj), "null"); 1962 return _heap->is_marked_current(obj); 1963 } 1964 1965 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : 1966 _heap(ShenandoahHeap::heap_no_check()) { 1967 } 1968 1969 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) { 1970 _heap = heap; 1971 } 1972 1973 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { 1974 1975 assert(_heap != NULL, "sanity"); 1976 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 1977 #ifdef ASSERT 1978 if (_heap->concurrent_mark_in_progress()) { 1979 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space"); 1980 } 1981 #endif 1982 assert(!oopDesc::is_null(obj), "null"); 1983 return _heap->is_marked_current(obj); 1984 } 1985 1986 void ShenandoahHeap::ref_processing_init() { 1987 MemRegion mr = reserved_region(); 1988 1989 isAlive.init(ShenandoahHeap::heap()); 1990 _ref_processor = 1991 new ReferenceProcessor(mr, // span 1992 ParallelRefProcEnabled, 1993 // mt processing 1994 (int) ConcGCThreads, 1995 // degree of mt processing 1996 true, 1997 // mt discovery 1998 (int) ConcGCThreads, 1999 // degree of mt discovery 2000 false, 2001 // Reference discovery is not atomic 2002 &isAlive); 2003 2004 } 2005 2006 #ifdef ASSERT 2007 void ShenandoahHeap::set_from_region_protection(bool protect) { 2008 for (uint i = 0; i < _num_regions; i++) { 2009 ShenandoahHeapRegion* region = _ordered_regions->get(i); 2010 if (region != NULL && region->is_in_collection_set()) { 2011 if (protect) { 2012 region->memProtectionOn(); 2013 } else { 2014 region->memProtectionOff(); 2015 } 2016 } 2017 } 2018 } 2019 #endif 2020 2021 size_t ShenandoahHeap::num_regions() { 2022 return _num_regions; 2023 } 2024 2025 size_t ShenandoahHeap::max_regions() { 2026 return _max_regions; 2027 } 2028 2029 GCTracer* ShenandoahHeap::tracer() { 2030 return shenandoahPolicy()->tracer(); 2031 } 2032 2033 size_t ShenandoahHeap::tlab_used(Thread* thread) const { 2034 return _free_regions->used(); 2035 } 2036 2037 void ShenandoahHeap::cancel_concgc() { 2038 // only report it once 2039 if (!_cancelled_concgc) { 2040 log_info(gc)("Cancelling GC"); 2041 _cancelled_concgc = true; 2042 OrderAccess::fence(); 2043 _shenandoah_policy->report_concgc_cancelled(); 2044 } 2045 2046 } 2047 2048 void ShenandoahHeap::clear_cancelled_concgc() { 2049 _cancelled_concgc = false; 2050 } 2051 2052 int ShenandoahHeap::max_workers() { 2053 return _max_workers; 2054 } 2055 2056 int ShenandoahHeap::max_parallel_workers() { 2057 return _max_parallel_workers; 2058 } 2059 int ShenandoahHeap::max_conc_workers() { 2060 return _max_conc_workers; 2061 } 2062 2063 void ShenandoahHeap::stop() { 2064 // We set this early here, to let GC threads terminate before we ask the concurrent thread 2065 // to terminate, which would otherwise block until all GC threads come to finish normally. 2066 _cancelled_concgc = true; 2067 _concurrent_gc_thread->stop(); 2068 cancel_concgc(); 2069 } 2070 2071 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) { 2072 2073 StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols); 2074 workers()->run_task(&shenandoah_unlink_task); 2075 2076 // if (G1StringDedup::is_enabled()) { 2077 // G1StringDedup::unlink(is_alive); 2078 // } 2079 } 2080 2081 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) { 2082 _need_update_refs = need_update_refs; 2083 } 2084 2085 //fixme this should be in heapregionset 2086 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) { 2087 size_t region_idx = r->region_number() + 1; 2088 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx); 2089 guarantee(next->region_number() == region_idx, "region number must match"); 2090 while (next->is_humongous()) { 2091 region_idx = next->region_number() + 1; 2092 next = _ordered_regions->get(region_idx); 2093 guarantee(next->region_number() == region_idx, "region number must match"); 2094 } 2095 return next; 2096 } 2097 2098 bool ShenandoahHeap::is_in_collection_set(const void* p) { 2099 return heap_region_containing(p)->is_in_collection_set(); 2100 } 2101 2102 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() { 2103 return _monitoring_support; 2104 } 2105 2106 bool ShenandoahHeap::is_obj_dead(const oop obj, const ShenandoahHeapRegion* r) const { 2107 return ! r->allocated_after_prev_mark_start((HeapWord*) obj) && 2108 ! is_marked_prev(obj, r); 2109 } 2110 CMBitMap* ShenandoahHeap::prev_mark_bit_map() { 2111 return _prev_mark_bit_map; 2112 } 2113 2114 CMBitMap* ShenandoahHeap::next_mark_bit_map() { 2115 return _next_mark_bit_map; 2116 } 2117 2118 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) { 2119 _free_regions->add_region(r); 2120 } 2121 2122 void ShenandoahHeap::clear_free_regions() { 2123 _free_regions->clear(); 2124 } 2125 2126 void ShenandoahHeap::register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) { 2127 assert(_in_cset_fast_test_base != NULL, "sanity"); 2128 assert(r->is_in_collection_set(), "invariant"); 2129 uint index = r->region_number(); 2130 assert(index < _in_cset_fast_test_length, "invariant"); 2131 assert(!_in_cset_fast_test_base[index], "invariant"); 2132 _in_cset_fast_test_base[index] = true; 2133 } 2134 2135 address ShenandoahHeap::in_cset_fast_test_addr() { 2136 return (address) (ShenandoahHeap::heap()->_in_cset_fast_test); 2137 } 2138 2139 void ShenandoahHeap::clear_cset_fast_test() { 2140 assert(_in_cset_fast_test_base != NULL, "sanity"); 2141 memset(_in_cset_fast_test_base, false, 2142 (size_t) _in_cset_fast_test_length * sizeof(bool)); 2143 } 2144 2145 size_t ShenandoahHeap::conservative_max_heap_alignment() { 2146 return HeapRegionBounds::max_size(); 2147 } 2148 2149 size_t ShenandoahHeap::bytes_allocated_since_cm() { 2150 return _bytes_allocated_since_cm; 2151 } 2152 2153 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) { 2154 _bytes_allocated_since_cm = bytes; 2155 } 2156 2157 size_t ShenandoahHeap::max_allocated_gc() { 2158 return _max_allocated_gc; 2159 } 2160 2161 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) { 2162 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift; 2163 _top_at_mark_starts[index] = addr; 2164 } 2165 2166 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { 2167 _full_gc_in_progress = in_progress; 2168 } 2169 2170 bool ShenandoahHeap::is_full_gc_in_progress() const { 2171 return _full_gc_in_progress; 2172 } 2173 2174 bool ShenandoahHeap::needs_reference_pending_list_locker_thread() const { 2175 return true; 2176 } 2177 2178 class NMethodOopInitializer : public OopClosure { 2179 private: 2180 ShenandoahHeap* _heap; 2181 public: 2182 NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) { 2183 } 2184 2185 private: 2186 template <class T> 2187 inline void do_oop_work(T* p) { 2188 T o = oopDesc::load_heap_oop(p); 2189 if (! oopDesc::is_null(o)) { 2190 oop obj1 = oopDesc::decode_heap_oop_not_null(o); 2191 oop obj2 = oopDesc::bs()->write_barrier(obj1); 2192 if (! oopDesc::unsafe_equals(obj1, obj2)) { 2193 oopDesc::encode_store_heap_oop(p, obj2); 2194 } 2195 } 2196 } 2197 2198 public: 2199 void do_oop(oop* o) { 2200 do_oop_work(o); 2201 } 2202 void do_oop(narrowOop* o) { 2203 do_oop_work(o); 2204 } 2205 }; 2206 2207 void ShenandoahHeap::register_nmethod(nmethod* nm) { 2208 NMethodOopInitializer init; 2209 nm->oops_do(&init); 2210 nm->fix_oop_relocations(); 2211 } 2212 2213 void ShenandoahHeap::unregister_nmethod(nmethod* nm) { 2214 } 2215 2216 void ShenandoahHeap::enter_critical(oop o) { 2217 heap_region_containing(o)->enter_critical(); 2218 } 2219 2220 void ShenandoahHeap::exit_critical(oop o) { 2221 heap_region_containing(o)->exit_critical(); 2222 } 2223 2224 2225 GCTimer* ShenandoahHeap::gc_timer() const { 2226 return _gc_timer; 2227 }