1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp" 38 #include "memory/gcLocker.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/vmThread.hpp" 43 #include "utilities/vmError.hpp" 44 45 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; 46 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; 47 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; 48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; 49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; 50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; 51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; 52 53 static void trace_gen_sizes(const char* const str, 54 size_t pg_min, size_t pg_max, 55 size_t og_min, size_t og_max, 56 size_t yg_min, size_t yg_max) 57 { 58 if (TracePageSizes) { 59 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " 60 SIZE_FORMAT "," SIZE_FORMAT " " 61 SIZE_FORMAT "," SIZE_FORMAT " " 62 SIZE_FORMAT, 63 str, pg_min / K, pg_max / K, 64 og_min / K, og_max / K, 65 yg_min / K, yg_max / K, 66 (pg_max + og_max + yg_max) / K); 67 } 68 } 69 70 jint ParallelScavengeHeap::initialize() { 71 CollectedHeap::pre_initialize(); 72 73 // Cannot be initialized until after the flags are parsed 74 // GenerationSizer flag_parser; 75 _collector_policy = new GenerationSizer(); 76 77 size_t yg_min_size = _collector_policy->min_young_gen_size(); 78 size_t yg_max_size = _collector_policy->max_young_gen_size(); 79 size_t og_min_size = _collector_policy->min_old_gen_size(); 80 size_t og_max_size = _collector_policy->max_old_gen_size(); 81 // Why isn't there a min_perm_gen_size()? 82 size_t pg_min_size = _collector_policy->perm_gen_size(); 83 size_t pg_max_size = _collector_policy->max_perm_gen_size(); 84 85 trace_gen_sizes("ps heap raw", 86 pg_min_size, pg_max_size, 87 og_min_size, og_max_size, 88 yg_min_size, yg_max_size); 89 90 // The ReservedSpace ctor used below requires that the page size for the perm 91 // gen is <= the page size for the rest of the heap (young + old gens). 92 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, 93 yg_max_size + og_max_size, 94 8); 95 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, 96 pg_max_size, 16), 97 og_page_sz); 98 99 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); 100 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); 101 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); 102 103 // Update sizes to reflect the selected page size(s). 104 // 105 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it 106 // should check UseAdaptiveSizePolicy. Changes from generationSizer could 107 // move to the common code. 108 yg_min_size = align_size_up(yg_min_size, yg_align); 109 yg_max_size = align_size_up(yg_max_size, yg_align); 110 size_t yg_cur_size = 111 align_size_up(_collector_policy->young_gen_size(), yg_align); 112 yg_cur_size = MAX2(yg_cur_size, yg_min_size); 113 114 og_min_size = align_size_up(og_min_size, og_align); 115 // Align old gen size down to preserve specified heap size. 116 assert(og_align == yg_align, "sanity"); 117 size_t og_size = align_size_down(og_max_size, og_align); 118 if (og_size < og_min_size) { 119 og_max_size = og_min_size; 120 } else { 121 og_max_size = og_size; 122 } 123 size_t og_cur_size = 124 align_size_up(_collector_policy->old_gen_size(), og_align); 125 og_cur_size = MAX2(og_cur_size, og_min_size); 126 127 pg_min_size = align_size_up(pg_min_size, pg_align); 128 pg_max_size = align_size_up(pg_max_size, pg_align); 129 size_t pg_cur_size = pg_min_size; 130 131 trace_gen_sizes("ps heap rnd", 132 pg_min_size, pg_max_size, 133 og_min_size, og_max_size, 134 yg_min_size, yg_max_size); 135 136 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; 137 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 138 139 // The main part of the heap (old gen + young gen) can often use a larger page 140 // size than is needed or wanted for the perm gen. Use the "compound 141 // alignment" ReservedSpace ctor to avoid having to use the same page size for 142 // all gens. 143 144 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, 145 og_align, addr); 146 147 if (UseCompressedOops) { 148 if (addr != NULL && !heap_rs.is_reserved()) { 149 // Failed to reserve at specified address - the requested memory 150 // region is taken already, for example, by 'java' launcher. 151 // Try again to reserver heap higher. 152 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 153 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, 154 og_align, addr); 155 if (addr != NULL && !heap_rs0.is_reserved()) { 156 // Failed to reserve at specified address again - give up. 157 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 158 assert(addr == NULL, ""); 159 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, 160 og_align, addr); 161 heap_rs = heap_rs1; 162 } else { 163 heap_rs = heap_rs0; 164 } 165 } 166 } 167 168 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, 169 heap_rs.base(), pg_max_size); 170 os::trace_page_sizes("ps main", og_min_size + yg_min_size, 171 og_max_size + yg_max_size, og_page_sz, 172 heap_rs.base() + pg_max_size, 173 heap_rs.size() - pg_max_size); 174 if (!heap_rs.is_reserved()) { 175 vm_shutdown_during_initialization( 176 "Could not reserve enough space for object heap"); 177 return JNI_ENOMEM; 178 } 179 180 _reserved = MemRegion((HeapWord*)heap_rs.base(), 181 (HeapWord*)(heap_rs.base() + heap_rs.size())); 182 183 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); 184 _barrier_set = barrier_set; 185 oopDesc::set_bs(_barrier_set); 186 if (_barrier_set == NULL) { 187 vm_shutdown_during_initialization( 188 "Could not reserve enough space for barrier set"); 189 return JNI_ENOMEM; 190 } 191 192 // Initial young gen size is 4 Mb 193 // 194 // XXX - what about flag_parser.young_gen_size()? 195 const size_t init_young_size = align_size_up(4 * M, yg_align); 196 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); 197 198 // Split the reserved space into perm gen and the main heap (everything else). 199 // The main heap uses a different alignment. 200 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); 201 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); 202 203 // Make up the generations 204 // Calculate the maximum size that a generation can grow. This 205 // includes growth into the other generation. Note that the 206 // parameter _max_gen_size is kept as the maximum 207 // size of the generation as the boundaries currently stand. 208 // _max_gen_size is still used as that value. 209 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; 210 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; 211 212 _gens = new AdjoiningGenerations(main_rs, 213 og_cur_size, 214 og_min_size, 215 og_max_size, 216 yg_cur_size, 217 yg_min_size, 218 yg_max_size, 219 yg_align); 220 221 _old_gen = _gens->old_gen(); 222 _young_gen = _gens->young_gen(); 223 224 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); 225 const size_t old_capacity = _old_gen->capacity_in_bytes(); 226 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); 227 _size_policy = 228 new PSAdaptiveSizePolicy(eden_capacity, 229 initial_promo_size, 230 young_gen()->to_space()->capacity_in_bytes(), 231 intra_heap_alignment(), 232 max_gc_pause_sec, 233 max_gc_minor_pause_sec, 234 GCTimeRatio 235 ); 236 237 _perm_gen = new PSPermGen(perm_rs, 238 pg_align, 239 pg_cur_size, 240 pg_cur_size, 241 pg_max_size, 242 "perm", 2); 243 244 assert(!UseAdaptiveGCBoundary || 245 (old_gen()->virtual_space()->high_boundary() == 246 young_gen()->virtual_space()->low_boundary()), 247 "Boundaries must meet"); 248 // initialize the policy counters - 2 collectors, 3 generations 249 _gc_policy_counters = 250 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); 251 _psh = this; 252 253 // Set up the GCTaskManager 254 _gc_task_manager = GCTaskManager::create(ParallelGCThreads); 255 256 if (UseParallelOldGC && !PSParallelCompact::initialize()) { 257 return JNI_ENOMEM; 258 } 259 260 return JNI_OK; 261 } 262 263 void ParallelScavengeHeap::post_initialize() { 264 // Need to init the tenuring threshold 265 PSScavenge::initialize(); 266 if (UseParallelOldGC) { 267 PSParallelCompact::post_initialize(); 268 } else { 269 PSMarkSweep::initialize(); 270 } 271 PSPromotionManager::initialize(); 272 } 273 274 void ParallelScavengeHeap::update_counters() { 275 young_gen()->update_counters(); 276 old_gen()->update_counters(); 277 perm_gen()->update_counters(); 278 } 279 280 size_t ParallelScavengeHeap::capacity() const { 281 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); 282 return value; 283 } 284 285 size_t ParallelScavengeHeap::used() const { 286 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); 287 return value; 288 } 289 290 bool ParallelScavengeHeap::is_maximal_no_gc() const { 291 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); 292 } 293 294 295 size_t ParallelScavengeHeap::permanent_capacity() const { 296 return perm_gen()->capacity_in_bytes(); 297 } 298 299 size_t ParallelScavengeHeap::permanent_used() const { 300 return perm_gen()->used_in_bytes(); 301 } 302 303 size_t ParallelScavengeHeap::max_capacity() const { 304 size_t estimated = reserved_region().byte_size(); 305 estimated -= perm_gen()->reserved().byte_size(); 306 if (UseAdaptiveSizePolicy) { 307 estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); 308 } else { 309 estimated -= young_gen()->to_space()->capacity_in_bytes(); 310 } 311 return MAX2(estimated, capacity()); 312 } 313 314 bool ParallelScavengeHeap::is_in(const void* p) const { 315 if (young_gen()->is_in(p)) { 316 return true; 317 } 318 319 if (old_gen()->is_in(p)) { 320 return true; 321 } 322 323 if (perm_gen()->is_in(p)) { 324 return true; 325 } 326 327 return false; 328 } 329 330 bool ParallelScavengeHeap::is_in_reserved(const void* p) const { 331 if (young_gen()->is_in_reserved(p)) { 332 return true; 333 } 334 335 if (old_gen()->is_in_reserved(p)) { 336 return true; 337 } 338 339 if (perm_gen()->is_in_reserved(p)) { 340 return true; 341 } 342 343 return false; 344 } 345 346 // There are two levels of allocation policy here. 347 // 348 // When an allocation request fails, the requesting thread must invoke a VM 349 // operation, transfer control to the VM thread, and await the results of a 350 // garbage collection. That is quite expensive, and we should avoid doing it 351 // multiple times if possible. 352 // 353 // To accomplish this, we have a basic allocation policy, and also a 354 // failed allocation policy. 355 // 356 // The basic allocation policy controls how you allocate memory without 357 // attempting garbage collection. It is okay to grab locks and 358 // expand the heap, if that can be done without coming to a safepoint. 359 // It is likely that the basic allocation policy will not be very 360 // aggressive. 361 // 362 // The failed allocation policy is invoked from the VM thread after 363 // the basic allocation policy is unable to satisfy a mem_allocate 364 // request. This policy needs to cover the entire range of collection, 365 // heap expansion, and out-of-memory conditions. It should make every 366 // attempt to allocate the requested memory. 367 368 // Basic allocation policy. Should never be called at a safepoint, or 369 // from the VM thread. 370 // 371 // This method must handle cases where many mem_allocate requests fail 372 // simultaneously. When that happens, only one VM operation will succeed, 373 // and the rest will not be executed. For that reason, this method loops 374 // during failed allocation attempts. If the java heap becomes exhausted, 375 // we rely on the size_policy object to force a bail out. 376 HeapWord* ParallelScavengeHeap::mem_allocate( 377 size_t size, 378 bool is_noref, 379 bool is_tlab, 380 bool* gc_overhead_limit_was_exceeded) { 381 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 382 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 383 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 384 385 // In general gc_overhead_limit_was_exceeded should be false so 386 // set it so here and reset it to true only if the gc time 387 // limit is being exceeded as checked below. 388 *gc_overhead_limit_was_exceeded = false; 389 390 HeapWord* result = young_gen()->allocate(size, is_tlab); 391 392 uint loop_count = 0; 393 uint gc_count = 0; 394 395 while (result == NULL) { 396 // We don't want to have multiple collections for a single filled generation. 397 // To prevent this, each thread tracks the total_collections() value, and if 398 // the count has changed, does not do a new collection. 399 // 400 // The collection count must be read only while holding the heap lock. VM 401 // operations also hold the heap lock during collections. There is a lock 402 // contention case where thread A blocks waiting on the Heap_lock, while 403 // thread B is holding it doing a collection. When thread A gets the lock, 404 // the collection count has already changed. To prevent duplicate collections, 405 // The policy MUST attempt allocations during the same period it reads the 406 // total_collections() value! 407 { 408 MutexLocker ml(Heap_lock); 409 gc_count = Universe::heap()->total_collections(); 410 411 result = young_gen()->allocate(size, is_tlab); 412 413 // (1) If the requested object is too large to easily fit in the 414 // young_gen, or 415 // (2) If GC is locked out via GCLocker, young gen is full and 416 // the need for a GC already signalled to GCLocker (done 417 // at a safepoint), 418 // ... then, rather than force a safepoint and (a potentially futile) 419 // collection (attempt) for each allocation, try allocation directly 420 // in old_gen. For case (2) above, we may in the future allow 421 // TLAB allocation directly in the old gen. 422 if (result != NULL) { 423 return result; 424 } 425 if (!is_tlab && 426 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { 427 result = old_gen()->allocate(size, is_tlab); 428 if (result != NULL) { 429 return result; 430 } 431 } 432 if (GC_locker::is_active_and_needs_gc()) { 433 // GC is locked out. If this is a TLAB allocation, 434 // return NULL; the requestor will retry allocation 435 // of an idividual object at a time. 436 if (is_tlab) { 437 return NULL; 438 } 439 440 // If this thread is not in a jni critical section, we stall 441 // the requestor until the critical section has cleared and 442 // GC allowed. When the critical section clears, a GC is 443 // initiated by the last thread exiting the critical section; so 444 // we retry the allocation sequence from the beginning of the loop, 445 // rather than causing more, now probably unnecessary, GC attempts. 446 JavaThread* jthr = JavaThread::current(); 447 if (!jthr->in_critical()) { 448 MutexUnlocker mul(Heap_lock); 449 GC_locker::stall_until_clear(); 450 continue; 451 } else { 452 if (CheckJNICalls) { 453 fatal("Possible deadlock due to allocating while" 454 " in jni critical section"); 455 } 456 return NULL; 457 } 458 } 459 } 460 461 if (result == NULL) { 462 463 // Generate a VM operation 464 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); 465 VMThread::execute(&op); 466 467 // Did the VM operation execute? If so, return the result directly. 468 // This prevents us from looping until time out on requests that can 469 // not be satisfied. 470 if (op.prologue_succeeded()) { 471 assert(Universe::heap()->is_in_or_null(op.result()), 472 "result not in heap"); 473 474 // If GC was locked out during VM operation then retry allocation 475 // and/or stall as necessary. 476 if (op.gc_locked()) { 477 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 478 continue; // retry and/or stall as necessary 479 } 480 481 // Exit the loop if the gc time limit has been exceeded. 482 // The allocation must have failed above ("result" guarding 483 // this path is NULL) and the most recent collection has exceeded the 484 // gc overhead limit (although enough may have been collected to 485 // satisfy the allocation). Exit the loop so that an out-of-memory 486 // will be thrown (return a NULL ignoring the contents of 487 // op.result()), 488 // but clear gc_overhead_limit_exceeded so that the next collection 489 // starts with a clean slate (i.e., forgets about previous overhead 490 // excesses). Fill op.result() with a filler object so that the 491 // heap remains parsable. 492 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 493 const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); 494 assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); 495 if (limit_exceeded && softrefs_clear) { 496 *gc_overhead_limit_was_exceeded = true; 497 size_policy()->set_gc_overhead_limit_exceeded(false); 498 if (PrintGCDetails && Verbose) { 499 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " 500 "return NULL because gc_overhead_limit_exceeded is set"); 501 } 502 if (op.result() != NULL) { 503 CollectedHeap::fill_with_object(op.result(), size); 504 } 505 return NULL; 506 } 507 508 return op.result(); 509 } 510 } 511 512 // The policy object will prevent us from looping forever. If the 513 // time spent in gc crosses a threshold, we will bail out. 514 loop_count++; 515 if ((result == NULL) && (QueuedAllocationWarningCount > 0) && 516 (loop_count % QueuedAllocationWarningCount == 0)) { 517 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" 518 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); 519 } 520 } 521 522 return result; 523 } 524 525 // Failed allocation policy. Must be called from the VM thread, and 526 // only at a safepoint! Note that this method has policy for allocation 527 // flow, and NOT collection policy. So we do not check for gc collection 528 // time over limit here, that is the responsibility of the heap specific 529 // collection methods. This method decides where to attempt allocations, 530 // and when to attempt collections, but no collection specific policy. 531 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { 532 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 533 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 534 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 535 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 536 537 size_t mark_sweep_invocation_count = total_invocations(); 538 539 // We assume (and assert!) that an allocation at this point will fail 540 // unless we collect. 541 542 // First level allocation failure, scavenge and allocate in young gen. 543 GCCauseSetter gccs(this, GCCause::_allocation_failure); 544 PSScavenge::invoke(); 545 HeapWord* result = young_gen()->allocate(size, is_tlab); 546 547 // Second level allocation failure. 548 // Mark sweep and allocate in young generation. 549 if (result == NULL) { 550 // There is some chance the scavenge method decided to invoke mark_sweep. 551 // Don't mark sweep twice if so. 552 if (mark_sweep_invocation_count == total_invocations()) { 553 invoke_full_gc(false); 554 result = young_gen()->allocate(size, is_tlab); 555 } 556 } 557 558 // Third level allocation failure. 559 // After mark sweep and young generation allocation failure, 560 // allocate in old generation. 561 if (result == NULL && !is_tlab) { 562 result = old_gen()->allocate(size, is_tlab); 563 } 564 565 // Fourth level allocation failure. We're running out of memory. 566 // More complete mark sweep and allocate in young generation. 567 if (result == NULL) { 568 invoke_full_gc(true); 569 result = young_gen()->allocate(size, is_tlab); 570 } 571 572 // Fifth level allocation failure. 573 // After more complete mark sweep, allocate in old generation. 574 if (result == NULL && !is_tlab) { 575 result = old_gen()->allocate(size, is_tlab); 576 } 577 578 return result; 579 } 580 581 // 582 // This is the policy loop for allocating in the permanent generation. 583 // If the initial allocation fails, we create a vm operation which will 584 // cause a collection. 585 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { 586 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); 587 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); 588 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 589 590 HeapWord* result; 591 592 uint loop_count = 0; 593 uint gc_count = 0; 594 uint full_gc_count = 0; 595 596 do { 597 // We don't want to have multiple collections for a single filled generation. 598 // To prevent this, each thread tracks the total_collections() value, and if 599 // the count has changed, does not do a new collection. 600 // 601 // The collection count must be read only while holding the heap lock. VM 602 // operations also hold the heap lock during collections. There is a lock 603 // contention case where thread A blocks waiting on the Heap_lock, while 604 // thread B is holding it doing a collection. When thread A gets the lock, 605 // the collection count has already changed. To prevent duplicate collections, 606 // The policy MUST attempt allocations during the same period it reads the 607 // total_collections() value! 608 { 609 MutexLocker ml(Heap_lock); 610 gc_count = Universe::heap()->total_collections(); 611 full_gc_count = Universe::heap()->total_full_collections(); 612 613 result = perm_gen()->allocate_permanent(size); 614 615 if (result != NULL) { 616 return result; 617 } 618 619 if (GC_locker::is_active_and_needs_gc()) { 620 // If this thread is not in a jni critical section, we stall 621 // the requestor until the critical section has cleared and 622 // GC allowed. When the critical section clears, a GC is 623 // initiated by the last thread exiting the critical section; so 624 // we retry the allocation sequence from the beginning of the loop, 625 // rather than causing more, now probably unnecessary, GC attempts. 626 JavaThread* jthr = JavaThread::current(); 627 if (!jthr->in_critical()) { 628 MutexUnlocker mul(Heap_lock); 629 GC_locker::stall_until_clear(); 630 continue; 631 } else { 632 if (CheckJNICalls) { 633 fatal("Possible deadlock due to allocating while" 634 " in jni critical section"); 635 } 636 return NULL; 637 } 638 } 639 } 640 641 if (result == NULL) { 642 643 // Exit the loop if the gc time limit has been exceeded. 644 // The allocation must have failed above (result must be NULL), 645 // and the most recent collection must have exceeded the 646 // gc time limit. Exit the loop so that an out-of-memory 647 // will be thrown (returning a NULL will do that), but 648 // clear gc_overhead_limit_exceeded so that the next collection 649 // will succeeded if the applications decides to handle the 650 // out-of-memory and tries to go on. 651 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 652 if (limit_exceeded) { 653 size_policy()->set_gc_overhead_limit_exceeded(false); 654 if (PrintGCDetails && Verbose) { 655 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" 656 " return NULL because gc_overhead_limit_exceeded is set"); 657 } 658 assert(result == NULL, "Allocation did not fail"); 659 return NULL; 660 } 661 662 // Generate a VM operation 663 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); 664 VMThread::execute(&op); 665 666 // Did the VM operation execute? If so, return the result directly. 667 // This prevents us from looping until time out on requests that can 668 // not be satisfied. 669 if (op.prologue_succeeded()) { 670 assert(Universe::heap()->is_in_permanent_or_null(op.result()), 671 "result not in heap"); 672 // If GC was locked out during VM operation then retry allocation 673 // and/or stall as necessary. 674 if (op.gc_locked()) { 675 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); 676 continue; // retry and/or stall as necessary 677 } 678 // If a NULL results is being returned, an out-of-memory 679 // will be thrown now. Clear the gc_overhead_limit_exceeded 680 // flag to avoid the following situation. 681 // gc_overhead_limit_exceeded is set during a collection 682 // the collection fails to return enough space and an OOM is thrown 683 // a subsequent GC prematurely throws an out-of-memory because 684 // the gc_overhead_limit_exceeded counts did not start 685 // again from 0. 686 if (op.result() == NULL) { 687 size_policy()->reset_gc_overhead_limit_count(); 688 } 689 return op.result(); 690 } 691 } 692 693 // The policy object will prevent us from looping forever. If the 694 // time spent in gc crosses a threshold, we will bail out. 695 loop_count++; 696 if ((QueuedAllocationWarningCount > 0) && 697 (loop_count % QueuedAllocationWarningCount == 0)) { 698 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" 699 " size=%d", loop_count, size); 700 } 701 } while (result == NULL); 702 703 return result; 704 } 705 706 // 707 // This is the policy code for permanent allocations which have failed 708 // and require a collection. Note that just as in failed_mem_allocate, 709 // we do not set collection policy, only where & when to allocate and 710 // collect. 711 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { 712 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 713 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 714 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 715 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 716 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); 717 718 // We assume (and assert!) that an allocation at this point will fail 719 // unless we collect. 720 721 // First level allocation failure. Mark-sweep and allocate in perm gen. 722 GCCauseSetter gccs(this, GCCause::_allocation_failure); 723 invoke_full_gc(false); 724 HeapWord* result = perm_gen()->allocate_permanent(size); 725 726 // Second level allocation failure. We're running out of memory. 727 if (result == NULL) { 728 invoke_full_gc(true); 729 result = perm_gen()->allocate_permanent(size); 730 } 731 732 return result; 733 } 734 735 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { 736 CollectedHeap::ensure_parsability(retire_tlabs); 737 young_gen()->eden_space()->ensure_parsability(); 738 } 739 740 size_t ParallelScavengeHeap::unsafe_max_alloc() { 741 return young_gen()->eden_space()->free_in_bytes(); 742 } 743 744 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { 745 return young_gen()->eden_space()->tlab_capacity(thr); 746 } 747 748 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { 749 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); 750 } 751 752 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { 753 return young_gen()->allocate(size, true); 754 } 755 756 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { 757 CollectedHeap::accumulate_statistics_all_tlabs(); 758 } 759 760 void ParallelScavengeHeap::resize_all_tlabs() { 761 CollectedHeap::resize_all_tlabs(); 762 } 763 764 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { 765 // We don't need barriers for stores to objects in the 766 // young gen and, a fortiori, for initializing stores to 767 // objects therein. 768 return is_in_young(new_obj); 769 } 770 771 // This method is used by System.gc() and JVMTI. 772 void ParallelScavengeHeap::collect(GCCause::Cause cause) { 773 assert(!Heap_lock->owned_by_self(), 774 "this thread should not own the Heap_lock"); 775 776 unsigned int gc_count = 0; 777 unsigned int full_gc_count = 0; 778 { 779 MutexLocker ml(Heap_lock); 780 // This value is guarded by the Heap_lock 781 gc_count = Universe::heap()->total_collections(); 782 full_gc_count = Universe::heap()->total_full_collections(); 783 } 784 785 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); 786 VMThread::execute(&op); 787 } 788 789 // This interface assumes that it's being called by the 790 // vm thread. It collects the heap assuming that the 791 // heap lock is already held and that we are executing in 792 // the context of the vm thread. 793 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { 794 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 795 assert(Heap_lock->is_locked(), "Precondition#2"); 796 GCCauseSetter gcs(this, cause); 797 switch (cause) { 798 case GCCause::_heap_inspection: 799 case GCCause::_heap_dump: { 800 HandleMark hm; 801 invoke_full_gc(false); 802 break; 803 } 804 default: // XXX FIX ME 805 ShouldNotReachHere(); 806 } 807 } 808 809 810 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { 811 Unimplemented(); 812 } 813 814 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { 815 young_gen()->object_iterate(cl); 816 old_gen()->object_iterate(cl); 817 perm_gen()->object_iterate(cl); 818 } 819 820 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { 821 Unimplemented(); 822 } 823 824 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { 825 perm_gen()->object_iterate(cl); 826 } 827 828 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { 829 if (young_gen()->is_in_reserved(addr)) { 830 assert(young_gen()->is_in(addr), 831 "addr should be in allocated part of young gen"); 832 // called from os::print_location by find or VMError 833 if (Debugging || VMError::fatal_error_in_progress()) return NULL; 834 Unimplemented(); 835 } else if (old_gen()->is_in_reserved(addr)) { 836 assert(old_gen()->is_in(addr), 837 "addr should be in allocated part of old gen"); 838 return old_gen()->start_array()->object_start((HeapWord*)addr); 839 } else if (perm_gen()->is_in_reserved(addr)) { 840 assert(perm_gen()->is_in(addr), 841 "addr should be in allocated part of perm gen"); 842 return perm_gen()->start_array()->object_start((HeapWord*)addr); 843 } 844 return 0; 845 } 846 847 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { 848 return oop(addr)->size(); 849 } 850 851 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { 852 return block_start(addr) == addr; 853 } 854 855 jlong ParallelScavengeHeap::millis_since_last_gc() { 856 return UseParallelOldGC ? 857 PSParallelCompact::millis_since_last_gc() : 858 PSMarkSweep::millis_since_last_gc(); 859 } 860 861 void ParallelScavengeHeap::prepare_for_verify() { 862 ensure_parsability(false); // no need to retire TLABs for verification 863 } 864 865 void ParallelScavengeHeap::print() const { print_on(tty); } 866 867 void ParallelScavengeHeap::print_on(outputStream* st) const { 868 young_gen()->print_on(st); 869 old_gen()->print_on(st); 870 perm_gen()->print_on(st); 871 } 872 873 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { 874 PSScavenge::gc_task_manager()->threads_do(tc); 875 } 876 877 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { 878 PSScavenge::gc_task_manager()->print_threads_on(st); 879 } 880 881 void ParallelScavengeHeap::print_tracing_info() const { 882 if (TraceGen0Time) { 883 double time = PSScavenge::accumulated_time()->seconds(); 884 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); 885 } 886 if (TraceGen1Time) { 887 double time = PSMarkSweep::accumulated_time()->seconds(); 888 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); 889 } 890 } 891 892 893 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { 894 // Why do we need the total_collections()-filter below? 895 if (total_collections() > 0) { 896 if (!silent) { 897 gclog_or_tty->print("permanent "); 898 } 899 perm_gen()->verify(allow_dirty); 900 901 if (!silent) { 902 gclog_or_tty->print("tenured "); 903 } 904 old_gen()->verify(allow_dirty); 905 906 if (!silent) { 907 gclog_or_tty->print("eden "); 908 } 909 young_gen()->verify(allow_dirty); 910 } 911 if (!silent) { 912 gclog_or_tty->print("ref_proc "); 913 } 914 ReferenceProcessor::verify(); 915 } 916 917 void ParallelScavengeHeap::print_heap_change(size_t prev_used) { 918 if (PrintGCDetails && Verbose) { 919 gclog_or_tty->print(" " SIZE_FORMAT 920 "->" SIZE_FORMAT 921 "(" SIZE_FORMAT ")", 922 prev_used, used(), capacity()); 923 } else { 924 gclog_or_tty->print(" " SIZE_FORMAT "K" 925 "->" SIZE_FORMAT "K" 926 "(" SIZE_FORMAT "K)", 927 prev_used / K, used() / K, capacity() / K); 928 } 929 } 930 931 ParallelScavengeHeap* ParallelScavengeHeap::heap() { 932 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); 933 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); 934 return _psh; 935 } 936 937 // Before delegating the resize to the young generation, 938 // the reserved space for the young and old generations 939 // may be changed to accomodate the desired resize. 940 void ParallelScavengeHeap::resize_young_gen(size_t eden_size, 941 size_t survivor_size) { 942 if (UseAdaptiveGCBoundary) { 943 if (size_policy()->bytes_absorbed_from_eden() != 0) { 944 size_policy()->reset_bytes_absorbed_from_eden(); 945 return; // The generation changed size already. 946 } 947 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); 948 } 949 950 // Delegate the resize to the generation. 951 _young_gen->resize(eden_size, survivor_size); 952 } 953 954 // Before delegating the resize to the old generation, 955 // the reserved space for the young and old generations 956 // may be changed to accomodate the desired resize. 957 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { 958 if (UseAdaptiveGCBoundary) { 959 if (size_policy()->bytes_absorbed_from_eden() != 0) { 960 size_policy()->reset_bytes_absorbed_from_eden(); 961 return; // The generation changed size already. 962 } 963 gens()->adjust_boundary_for_old_gen_needs(desired_free_space); 964 } 965 966 // Delegate the resize to the generation. 967 _old_gen->resize(desired_free_space); 968 } 969 970 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() { 971 // nothing particular 972 } 973 974 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() { 975 // nothing particular 976 } 977 978 #ifndef PRODUCT 979 void ParallelScavengeHeap::record_gen_tops_before_GC() { 980 if (ZapUnusedHeapArea) { 981 young_gen()->record_spaces_top(); 982 old_gen()->record_spaces_top(); 983 perm_gen()->record_spaces_top(); 984 } 985 } 986 987 void ParallelScavengeHeap::gen_mangle_unused_area() { 988 if (ZapUnusedHeapArea) { 989 young_gen()->eden_space()->mangle_unused_area(); 990 young_gen()->to_space()->mangle_unused_area(); 991 young_gen()->from_space()->mangle_unused_area(); 992 old_gen()->object_space()->mangle_unused_area(); 993 perm_gen()->object_space()->mangle_unused_area(); 994 } 995 } 996 #endif