1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.inline.hpp" 35 #include "gc/shared/gcWhen.hpp" 36 #include "gc/shared/memAllocator.hpp" 37 #include "gc/shared/vmGCOperations.hpp" 38 #include "logging/log.hpp" 39 #include "memory/metaspace.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "oops/instanceMirrorKlass.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/init.hpp" 45 #include "runtime/thread.inline.hpp" 46 #include "runtime/threadSMR.hpp" 47 #include "runtime/vmThread.hpp" 48 #include "services/heapDumper.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/copy.hpp" 51 52 class ClassLoaderData; 53 54 #ifdef ASSERT 55 int CollectedHeap::_fire_out_of_memory_count = 0; 56 #endif 57 58 size_t CollectedHeap::_filler_array_max_size = 0; 59 60 template <> 61 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 62 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 63 st->print_raw(m); 64 } 65 66 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 67 if (!should_log()) { 68 return; 69 } 70 71 double timestamp = fetch_timestamp(); 72 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); 73 int index = compute_log_index(); 74 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 75 _records[index].timestamp = timestamp; 76 _records[index].data.is_before = before; 77 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 78 79 st.print_cr("{Heap %s GC invocations=%u (full %u):", 80 before ? "before" : "after", 81 heap->total_collections(), 82 heap->total_full_collections()); 83 84 heap->print_on(&st); 85 st.print_cr("}"); 86 } 87 88 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 89 size_t capacity_in_words = capacity() / HeapWordSize; 90 91 return VirtualSpaceSummary( 92 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end()); 93 } 94 95 GCHeapSummary CollectedHeap::create_heap_summary() { 96 VirtualSpaceSummary heap_space = create_heap_space_summary(); 97 return GCHeapSummary(heap_space, used()); 98 } 99 100 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 101 const MetaspaceSizes meta_space( 102 MetaspaceUtils::committed_bytes(), 103 MetaspaceUtils::used_bytes(), 104 MetaspaceUtils::reserved_bytes()); 105 const MetaspaceSizes data_space( 106 MetaspaceUtils::committed_bytes(Metaspace::NonClassType), 107 MetaspaceUtils::used_bytes(Metaspace::NonClassType), 108 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType)); 109 const MetaspaceSizes class_space( 110 MetaspaceUtils::committed_bytes(Metaspace::ClassType), 111 MetaspaceUtils::used_bytes(Metaspace::ClassType), 112 MetaspaceUtils::reserved_bytes(Metaspace::ClassType)); 113 114 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 115 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 116 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 117 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 118 119 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 120 ms_chunk_free_list_summary, class_chunk_free_list_summary); 121 } 122 123 void CollectedHeap::print_heap_before_gc() { 124 Universe::print_heap_before_gc(); 125 if (_gc_heap_log != NULL) { 126 _gc_heap_log->log_heap_before(this); 127 } 128 } 129 130 void CollectedHeap::print_heap_after_gc() { 131 Universe::print_heap_after_gc(); 132 if (_gc_heap_log != NULL) { 133 _gc_heap_log->log_heap_after(this); 134 } 135 } 136 137 void CollectedHeap::print_on_error(outputStream* st) const { 138 st->print_cr("Heap:"); 139 print_extended_on(st); 140 st->cr(); 141 142 BarrierSet::barrier_set()->print_on(st); 143 } 144 145 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 146 const GCHeapSummary& heap_summary = create_heap_summary(); 147 gc_tracer->report_gc_heap_summary(when, heap_summary); 148 149 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 150 gc_tracer->report_metaspace_summary(when, metaspace_summary); 151 } 152 153 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 154 trace_heap(GCWhen::BeforeGC, gc_tracer); 155 } 156 157 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 158 trace_heap(GCWhen::AfterGC, gc_tracer); 159 } 160 161 // WhiteBox API support for concurrent collectors. These are the 162 // default implementations, for collectors which don't support this 163 // feature. 164 bool CollectedHeap::supports_concurrent_phase_control() const { 165 return false; 166 } 167 168 const char* const* CollectedHeap::concurrent_phases() const { 169 static const char* const result[] = { NULL }; 170 return result; 171 } 172 173 bool CollectedHeap::request_concurrent_phase(const char* phase) { 174 return false; 175 } 176 177 bool CollectedHeap::is_oop(oop object) const { 178 if (!check_obj_alignment(object)) { 179 return false; 180 } 181 182 if (!is_in_reserved(object)) { 183 return false; 184 } 185 186 if (is_in_reserved(object->klass_or_null())) { 187 return false; 188 } 189 190 return true; 191 } 192 193 // Memory state functions. 194 195 196 CollectedHeap::CollectedHeap() : 197 _is_gc_active(false), 198 _total_collections(0), 199 _total_full_collections(0), 200 _gc_cause(GCCause::_no_gc), 201 _gc_lastcause(GCCause::_no_gc) 202 { 203 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 204 const size_t elements_per_word = HeapWordSize / sizeof(jint); 205 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 206 max_len / elements_per_word); 207 208 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 209 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 210 211 if (UsePerfData) { 212 EXCEPTION_MARK; 213 214 // create the gc cause jvmstat counters 215 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 216 80, GCCause::to_string(_gc_cause), CHECK); 217 218 _perf_gc_lastcause = 219 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 220 80, GCCause::to_string(_gc_lastcause), CHECK); 221 } 222 223 // Create the ring log 224 if (LogEvents) { 225 _gc_heap_log = new GCHeapLog(); 226 } else { 227 _gc_heap_log = NULL; 228 } 229 } 230 231 // This interface assumes that it's being called by the 232 // vm thread. It collects the heap assuming that the 233 // heap lock is already held and that we are executing in 234 // the context of the vm thread. 235 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 236 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 237 assert(Heap_lock->is_locked(), "Precondition#2"); 238 GCCauseSetter gcs(this, cause); 239 switch (cause) { 240 case GCCause::_heap_inspection: 241 case GCCause::_heap_dump: 242 case GCCause::_metadata_GC_threshold : { 243 HandleMark hm; 244 do_full_collection(false); // don't clear all soft refs 245 break; 246 } 247 case GCCause::_metadata_GC_clear_soft_refs: { 248 HandleMark hm; 249 do_full_collection(true); // do clear all soft refs 250 break; 251 } 252 default: 253 ShouldNotReachHere(); // Unexpected use of this function 254 } 255 } 256 257 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 258 size_t word_size, 259 Metaspace::MetadataType mdtype) { 260 uint loop_count = 0; 261 uint gc_count = 0; 262 uint full_gc_count = 0; 263 264 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 265 266 do { 267 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 268 if (result != NULL) { 269 return result; 270 } 271 272 if (GCLocker::is_active_and_needs_gc()) { 273 // If the GCLocker is active, just expand and allocate. 274 // If that does not succeed, wait if this thread is not 275 // in a critical section itself. 276 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 277 if (result != NULL) { 278 return result; 279 } 280 JavaThread* jthr = JavaThread::current(); 281 if (!jthr->in_critical()) { 282 // Wait for JNI critical section to be exited 283 GCLocker::stall_until_clear(); 284 // The GC invoked by the last thread leaving the critical 285 // section will be a young collection and a full collection 286 // is (currently) needed for unloading classes so continue 287 // to the next iteration to get a full GC. 288 continue; 289 } else { 290 if (CheckJNICalls) { 291 fatal("Possible deadlock due to allocating while" 292 " in jni critical section"); 293 } 294 return NULL; 295 } 296 } 297 298 { // Need lock to get self consistent gc_count's 299 MutexLocker ml(Heap_lock); 300 gc_count = Universe::heap()->total_collections(); 301 full_gc_count = Universe::heap()->total_full_collections(); 302 } 303 304 // Generate a VM operation 305 VM_CollectForMetadataAllocation op(loader_data, 306 word_size, 307 mdtype, 308 gc_count, 309 full_gc_count, 310 GCCause::_metadata_GC_threshold); 311 VMThread::execute(&op); 312 313 // If GC was locked out, try again. Check before checking success because the 314 // prologue could have succeeded and the GC still have been locked out. 315 if (op.gc_locked()) { 316 continue; 317 } 318 319 if (op.prologue_succeeded()) { 320 return op.result(); 321 } 322 loop_count++; 323 if ((QueuedAllocationWarningCount > 0) && 324 (loop_count % QueuedAllocationWarningCount == 0)) { 325 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 326 " size=" SIZE_FORMAT, loop_count, word_size); 327 } 328 } while (true); // Until a GC is done 329 } 330 331 MemoryUsage CollectedHeap::memory_usage() { 332 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 333 } 334 335 336 #ifndef PRODUCT 337 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 338 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 339 for (size_t slot = 0; slot < size; slot += 1) { 340 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 341 "Found non badHeapWordValue in pre-allocation check"); 342 } 343 } 344 } 345 #endif // PRODUCT 346 347 size_t CollectedHeap::max_tlab_size() const { 348 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 349 // This restriction could be removed by enabling filling with multiple arrays. 350 // If we compute that the reasonable way as 351 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 352 // we'll overflow on the multiply, so we do the divide first. 353 // We actually lose a little by dividing first, 354 // but that just makes the TLAB somewhat smaller than the biggest array, 355 // which is fine, since we'll be able to fill that. 356 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 357 sizeof(jint) * 358 ((juint) max_jint / (size_t) HeapWordSize); 359 return align_down(max_int_size, MinObjAlignment); 360 } 361 362 size_t CollectedHeap::filler_array_hdr_size() { 363 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 364 } 365 366 size_t CollectedHeap::filler_array_min_size() { 367 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 368 } 369 370 #ifdef ASSERT 371 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 372 { 373 assert(words >= min_fill_size(), "too small to fill"); 374 assert(is_object_aligned(words), "unaligned size"); 375 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 376 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 377 } 378 379 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 380 { 381 if (ZapFillerObjects && zap) { 382 Copy::fill_to_words(start + filler_array_hdr_size(), 383 words - filler_array_hdr_size(), 0XDEAFBABE); 384 } 385 } 386 #endif // ASSERT 387 388 void 389 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 390 { 391 assert(words >= filler_array_min_size(), "too small for an array"); 392 assert(words <= filler_array_max_size(), "too big for a single object"); 393 394 const size_t payload_size = words - filler_array_hdr_size(); 395 const size_t len = payload_size * HeapWordSize / sizeof(jint); 396 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 397 398 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false); 399 allocator.initialize(start); 400 DEBUG_ONLY(zap_filler_array(start, words, zap);) 401 } 402 403 void 404 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 405 { 406 assert(words <= filler_array_max_size(), "too big for a single object"); 407 408 if (words >= filler_array_min_size()) { 409 fill_with_array(start, words, zap); 410 } else if (words > 0) { 411 assert(words == min_fill_size(), "unaligned size"); 412 ObjAllocator allocator(SystemDictionary::Object_klass(), words); 413 allocator.initialize(start); 414 } 415 } 416 417 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 418 { 419 DEBUG_ONLY(fill_args_check(start, words);) 420 HandleMark hm; // Free handles before leaving. 421 fill_with_object_impl(start, words, zap); 422 } 423 424 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 425 { 426 DEBUG_ONLY(fill_args_check(start, words);) 427 HandleMark hm; // Free handles before leaving. 428 429 // Multiple objects may be required depending on the filler array maximum size. Fill 430 // the range up to that with objects that are filler_array_max_size sized. The 431 // remainder is filled with a single object. 432 const size_t min = min_fill_size(); 433 const size_t max = filler_array_max_size(); 434 while (words > max) { 435 const size_t cur = (words - max) >= min ? max : max - min; 436 fill_with_array(start, cur, zap); 437 start += cur; 438 words -= cur; 439 } 440 441 fill_with_object_impl(start, words, zap); 442 } 443 444 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 445 CollectedHeap::fill_with_object(start, end, zap); 446 } 447 448 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 449 size_t requested_size, 450 size_t* actual_size) { 451 guarantee(false, "thread-local allocation buffers not supported"); 452 return NULL; 453 } 454 455 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) { 456 ObjAllocator allocator(klass, size, THREAD); 457 return allocator.allocate(); 458 } 459 460 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) { 461 ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD); 462 return allocator.allocate(); 463 } 464 465 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) { 466 ClassAllocator allocator(klass, size, THREAD); 467 return allocator.allocate(); 468 } 469 470 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 471 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 472 "Should only be called at a safepoint or at start-up"); 473 474 ThreadLocalAllocStats stats; 475 476 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) { 477 BarrierSet::barrier_set()->make_parsable(thread); 478 if (UseTLAB) { 479 if (retire_tlabs) { 480 thread->tlab().retire(&stats); 481 } else { 482 thread->tlab().make_parsable(); 483 } 484 } 485 } 486 487 stats.publish(); 488 } 489 490 void CollectedHeap::resize_all_tlabs() { 491 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 492 "Should only resize tlabs at safepoint"); 493 494 if (UseTLAB && ResizeTLAB) { 495 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 496 thread->tlab().resize(); 497 } 498 } 499 } 500 501 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 502 assert(timer != NULL, "timer is null"); 503 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 504 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 505 HeapDumper::dump_heap(); 506 } 507 508 LogTarget(Trace, gc, classhisto) lt; 509 if (lt.is_enabled()) { 510 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 511 ResourceMark rm; 512 LogStream ls(lt); 513 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 514 inspector.doit(); 515 } 516 } 517 518 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 519 full_gc_dump(timer, true); 520 } 521 522 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 523 full_gc_dump(timer, false); 524 } 525 526 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) { 527 // It is important to do this in a way such that concurrent readers can't 528 // temporarily think something is in the heap. (Seen this happen in asserts.) 529 _reserved.set_word_size(0); 530 _reserved.set_start(start); 531 _reserved.set_end(end); 532 } 533 534 void CollectedHeap::post_initialize() { 535 initialize_serviceability(); 536 } 537 538 #ifndef PRODUCT 539 540 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 541 // Access to count is not atomic; the value does not have to be exact. 542 if (PromotionFailureALot) { 543 const size_t gc_num = total_collections(); 544 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 545 if (elapsed_gcs >= PromotionFailureALotInterval) { 546 // Test for unsigned arithmetic wrap-around. 547 if (++*count >= PromotionFailureALotCount) { 548 *count = 0; 549 return true; 550 } 551 } 552 } 553 return false; 554 } 555 556 bool CollectedHeap::promotion_should_fail() { 557 return promotion_should_fail(&_promotion_failure_alot_count); 558 } 559 560 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 561 if (PromotionFailureALot) { 562 _promotion_failure_alot_gc_number = total_collections(); 563 *count = 0; 564 } 565 } 566 567 void CollectedHeap::reset_promotion_should_fail() { 568 reset_promotion_should_fail(&_promotion_failure_alot_count); 569 } 570 571 #endif // #ifndef PRODUCT 572 573 bool CollectedHeap::supports_object_pinning() const { 574 return false; 575 } 576 577 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { 578 ShouldNotReachHere(); 579 return NULL; 580 } 581 582 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { 583 ShouldNotReachHere(); 584 } 585 586 void CollectedHeap::deduplicate_string(oop str) { 587 // Do nothing, unless overridden in subclass. 588 } 589 590 size_t CollectedHeap::obj_size(oop obj) const { 591 return obj->size(); 592 } 593 594 size_t CollectedHeap::obj_header_size() const { 595 return oopDesc::header_size(); 596 } 597 598 size_t CollectedHeap::array_header_size(BasicType type) const { 599 return arrayOopDesc::header_size(type); 600 }