1 /* 2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc/shared/allocTracer.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/collectedHeap.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "gc/shared/gcLocker.inline.hpp" 32 #include "gc/shared/gcHeapSummary.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/gcTraceTime.inline.hpp" 35 #include "gc/shared/gcVMOperations.hpp" 36 #include "gc/shared/gcWhen.hpp" 37 #include "gc/shared/memAllocator.hpp" 38 #include "logging/log.hpp" 39 #include "memory/metaspace.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/instanceMirrorKlass.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/init.hpp" 46 #include "runtime/thread.inline.hpp" 47 #include "runtime/threadSMR.hpp" 48 #include "runtime/vmThread.hpp" 49 #include "services/heapDumper.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/copy.hpp" 52 53 class ClassLoaderData; 54 55 size_t CollectedHeap::_filler_array_max_size = 0; 56 57 template <> 58 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { 59 st->print_cr("GC heap %s", m.is_before ? "before" : "after"); 60 st->print_raw(m); 61 } 62 63 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) { 64 if (!should_log()) { 65 return; 66 } 67 68 double timestamp = fetch_timestamp(); 69 MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag); 70 int index = compute_log_index(); 71 _records[index].thread = NULL; // Its the GC thread so it's not that interesting. 72 _records[index].timestamp = timestamp; 73 _records[index].data.is_before = before; 74 stringStream st(_records[index].data.buffer(), _records[index].data.size()); 75 76 st.print_cr("{Heap %s GC invocations=%u (full %u):", 77 before ? "before" : "after", 78 heap->total_collections(), 79 heap->total_full_collections()); 80 81 heap->print_on(&st); 82 st.print_cr("}"); 83 } 84 85 size_t CollectedHeap::unused() const { 86 MutexLocker ml(Heap_lock); 87 return capacity() - used(); 88 } 89 90 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() { 91 size_t capacity_in_words = capacity() / HeapWordSize; 92 93 return VirtualSpaceSummary( 94 _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end()); 95 } 96 97 GCHeapSummary CollectedHeap::create_heap_summary() { 98 VirtualSpaceSummary heap_space = create_heap_space_summary(); 99 return GCHeapSummary(heap_space, used()); 100 } 101 102 MetaspaceSummary CollectedHeap::create_metaspace_summary() { 103 const MetaspaceSizes meta_space( 104 MetaspaceUtils::committed_bytes(), 105 MetaspaceUtils::used_bytes(), 106 MetaspaceUtils::reserved_bytes()); 107 const MetaspaceSizes data_space( 108 MetaspaceUtils::committed_bytes(Metaspace::NonClassType), 109 MetaspaceUtils::used_bytes(Metaspace::NonClassType), 110 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType)); 111 const MetaspaceSizes class_space( 112 MetaspaceUtils::committed_bytes(Metaspace::ClassType), 113 MetaspaceUtils::used_bytes(Metaspace::ClassType), 114 MetaspaceUtils::reserved_bytes(Metaspace::ClassType)); 115 116 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary = 117 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType); 118 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary = 119 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType); 120 121 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space, 122 ms_chunk_free_list_summary, class_chunk_free_list_summary); 123 } 124 125 void CollectedHeap::run_task_at_safepoint(AbstractGangTask* task, uint num_workers) { 126 assert(SafepointSynchronize::is_at_safepoint(), "Should only be called at a safepoint"); 127 128 WorkGang* gang = get_safepoint_workers(); 129 if (gang == NULL) { 130 // GC doesn't support parallel worker threads. 131 // Execute in this thread with worker id 0. 132 task->work(0); 133 } else { 134 gang->run_task(task, num_workers); 135 } 136 } 137 138 void CollectedHeap::print_heap_before_gc() { 139 Universe::print_heap_before_gc(); 140 if (_gc_heap_log != NULL) { 141 _gc_heap_log->log_heap_before(this); 142 } 143 } 144 145 void CollectedHeap::print_heap_after_gc() { 146 Universe::print_heap_after_gc(); 147 if (_gc_heap_log != NULL) { 148 _gc_heap_log->log_heap_after(this); 149 } 150 } 151 152 void CollectedHeap::print() const { print_on(tty); } 153 154 void CollectedHeap::print_on_error(outputStream* st) const { 155 st->print_cr("Heap:"); 156 print_extended_on(st); 157 st->cr(); 158 159 BarrierSet* bs = BarrierSet::barrier_set(); 160 if (bs != NULL) { 161 bs->print_on(st); 162 } 163 } 164 165 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { 166 const GCHeapSummary& heap_summary = create_heap_summary(); 167 gc_tracer->report_gc_heap_summary(when, heap_summary); 168 169 const MetaspaceSummary& metaspace_summary = create_metaspace_summary(); 170 gc_tracer->report_metaspace_summary(when, metaspace_summary); 171 } 172 173 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { 174 trace_heap(GCWhen::BeforeGC, gc_tracer); 175 } 176 177 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { 178 trace_heap(GCWhen::AfterGC, gc_tracer); 179 } 180 181 // Default implementation, for collectors that don't support the feature. 182 bool CollectedHeap::supports_concurrent_gc_breakpoints() const { 183 return false; 184 } 185 186 bool CollectedHeap::is_oop(oop object) const { 187 if (!is_object_aligned(object)) { 188 return false; 189 } 190 191 if (!is_in(object)) { 192 return false; 193 } 194 195 if (is_in(object->klass_or_null())) { 196 return false; 197 } 198 199 return true; 200 } 201 202 // Memory state functions. 203 204 205 CollectedHeap::CollectedHeap() : 206 _is_gc_active(false), 207 _last_whole_heap_examined_time_ns(os::javaTimeNanos()), 208 _total_collections(0), 209 _total_full_collections(0), 210 _gc_cause(GCCause::_no_gc), 211 _gc_lastcause(GCCause::_no_gc) 212 { 213 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 214 const size_t elements_per_word = HeapWordSize / sizeof(jint); 215 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 216 max_len / elements_per_word); 217 218 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 219 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 220 221 if (UsePerfData) { 222 EXCEPTION_MARK; 223 224 // create the gc cause jvmstat counters 225 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 226 80, GCCause::to_string(_gc_cause), CHECK); 227 228 _perf_gc_lastcause = 229 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 230 80, GCCause::to_string(_gc_lastcause), CHECK); 231 } 232 233 // Create the ring log 234 if (LogEvents) { 235 _gc_heap_log = new GCHeapLog(); 236 } else { 237 _gc_heap_log = NULL; 238 } 239 } 240 241 // This interface assumes that it's being called by the 242 // vm thread. It collects the heap assuming that the 243 // heap lock is already held and that we are executing in 244 // the context of the vm thread. 245 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 246 Thread* thread = Thread::current(); 247 assert(thread->is_VM_thread(), "Precondition#1"); 248 assert(Heap_lock->is_locked(), "Precondition#2"); 249 GCCauseSetter gcs(this, cause); 250 switch (cause) { 251 case GCCause::_heap_inspection: 252 case GCCause::_heap_dump: 253 case GCCause::_metadata_GC_threshold : { 254 HandleMark hm(thread); 255 do_full_collection(false); // don't clear all soft refs 256 break; 257 } 258 case GCCause::_archive_time_gc: 259 case GCCause::_metadata_GC_clear_soft_refs: { 260 HandleMark hm(thread); 261 do_full_collection(true); // do clear all soft refs 262 break; 263 } 264 default: 265 ShouldNotReachHere(); // Unexpected use of this function 266 } 267 } 268 269 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 270 size_t word_size, 271 Metaspace::MetadataType mdtype) { 272 uint loop_count = 0; 273 uint gc_count = 0; 274 uint full_gc_count = 0; 275 276 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 277 278 do { 279 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 280 if (result != NULL) { 281 return result; 282 } 283 284 if (GCLocker::is_active_and_needs_gc()) { 285 // If the GCLocker is active, just expand and allocate. 286 // If that does not succeed, wait if this thread is not 287 // in a critical section itself. 288 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype); 289 if (result != NULL) { 290 return result; 291 } 292 JavaThread* jthr = JavaThread::current(); 293 if (!jthr->in_critical()) { 294 // Wait for JNI critical section to be exited 295 GCLocker::stall_until_clear(); 296 // The GC invoked by the last thread leaving the critical 297 // section will be a young collection and a full collection 298 // is (currently) needed for unloading classes so continue 299 // to the next iteration to get a full GC. 300 continue; 301 } else { 302 if (CheckJNICalls) { 303 fatal("Possible deadlock due to allocating while" 304 " in jni critical section"); 305 } 306 return NULL; 307 } 308 } 309 310 { // Need lock to get self consistent gc_count's 311 MutexLocker ml(Heap_lock); 312 gc_count = Universe::heap()->total_collections(); 313 full_gc_count = Universe::heap()->total_full_collections(); 314 } 315 316 // Generate a VM operation 317 VM_CollectForMetadataAllocation op(loader_data, 318 word_size, 319 mdtype, 320 gc_count, 321 full_gc_count, 322 GCCause::_metadata_GC_threshold); 323 VMThread::execute(&op); 324 325 // If GC was locked out, try again. Check before checking success because the 326 // prologue could have succeeded and the GC still have been locked out. 327 if (op.gc_locked()) { 328 continue; 329 } 330 331 if (op.prologue_succeeded()) { 332 return op.result(); 333 } 334 loop_count++; 335 if ((QueuedAllocationWarningCount > 0) && 336 (loop_count % QueuedAllocationWarningCount == 0)) { 337 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times," 338 " size=" SIZE_FORMAT, loop_count, word_size); 339 } 340 } while (true); // Until a GC is done 341 } 342 343 MemoryUsage CollectedHeap::memory_usage() { 344 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity()); 345 } 346 347 348 #ifndef PRODUCT 349 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 350 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 351 // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word 352 for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) { 353 assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check"); 354 } 355 } 356 } 357 #endif // PRODUCT 358 359 size_t CollectedHeap::max_tlab_size() const { 360 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. 361 // This restriction could be removed by enabling filling with multiple arrays. 362 // If we compute that the reasonable way as 363 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize) 364 // we'll overflow on the multiply, so we do the divide first. 365 // We actually lose a little by dividing first, 366 // but that just makes the TLAB somewhat smaller than the biggest array, 367 // which is fine, since we'll be able to fill that. 368 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) + 369 sizeof(jint) * 370 ((juint) max_jint / (size_t) HeapWordSize); 371 return align_down(max_int_size, MinObjAlignment); 372 } 373 374 size_t CollectedHeap::filler_array_hdr_size() { 375 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long 376 } 377 378 size_t CollectedHeap::filler_array_min_size() { 379 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 380 } 381 382 #ifdef ASSERT 383 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 384 { 385 assert(words >= min_fill_size(), "too small to fill"); 386 assert(is_object_aligned(words), "unaligned size"); 387 } 388 389 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 390 { 391 if (ZapFillerObjects && zap) { 392 Copy::fill_to_words(start + filler_array_hdr_size(), 393 words - filler_array_hdr_size(), 0XDEAFBABE); 394 } 395 } 396 #endif // ASSERT 397 398 void 399 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 400 { 401 assert(words >= filler_array_min_size(), "too small for an array"); 402 assert(words <= filler_array_max_size(), "too big for a single object"); 403 404 const size_t payload_size = words - filler_array_hdr_size(); 405 const size_t len = payload_size * HeapWordSize / sizeof(jint); 406 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len); 407 408 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false); 409 allocator.initialize(start); 410 DEBUG_ONLY(zap_filler_array(start, words, zap);) 411 } 412 413 void 414 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 415 { 416 assert(words <= filler_array_max_size(), "too big for a single object"); 417 418 if (words >= filler_array_min_size()) { 419 fill_with_array(start, words, zap); 420 } else if (words > 0) { 421 assert(words == min_fill_size(), "unaligned size"); 422 ObjAllocator allocator(SystemDictionary::Object_klass(), words); 423 allocator.initialize(start); 424 } 425 } 426 427 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 428 { 429 DEBUG_ONLY(fill_args_check(start, words);) 430 HandleMark hm(Thread::current()); // Free handles before leaving. 431 fill_with_object_impl(start, words, zap); 432 } 433 434 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 435 { 436 DEBUG_ONLY(fill_args_check(start, words);) 437 HandleMark hm(Thread::current()); // Free handles before leaving. 438 439 // Multiple objects may be required depending on the filler array maximum size. Fill 440 // the range up to that with objects that are filler_array_max_size sized. The 441 // remainder is filled with a single object. 442 const size_t min = min_fill_size(); 443 const size_t max = filler_array_max_size(); 444 while (words > max) { 445 const size_t cur = (words - max) >= min ? max : max - min; 446 fill_with_array(start, cur, zap); 447 start += cur; 448 words -= cur; 449 } 450 451 fill_with_object_impl(start, words, zap); 452 } 453 454 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) { 455 CollectedHeap::fill_with_object(start, end, zap); 456 } 457 458 size_t CollectedHeap::min_dummy_object_size() const { 459 return oopDesc::header_size(); 460 } 461 462 size_t CollectedHeap::tlab_alloc_reserve() const { 463 size_t min_size = min_dummy_object_size(); 464 return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0; 465 } 466 467 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size, 468 size_t requested_size, 469 size_t* actual_size) { 470 guarantee(false, "thread-local allocation buffers not supported"); 471 return NULL; 472 } 473 474 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 475 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 476 "Should only be called at a safepoint or at start-up"); 477 478 ThreadLocalAllocStats stats; 479 480 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) { 481 BarrierSet::barrier_set()->make_parsable(thread); 482 if (UseTLAB) { 483 if (retire_tlabs) { 484 thread->tlab().retire(&stats); 485 } else { 486 thread->tlab().make_parsable(); 487 } 488 } 489 } 490 491 stats.publish(); 492 } 493 494 void CollectedHeap::resize_all_tlabs() { 495 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), 496 "Should only resize tlabs at safepoint"); 497 498 if (UseTLAB && ResizeTLAB) { 499 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 500 thread->tlab().resize(); 501 } 502 } 503 } 504 505 jlong CollectedHeap::millis_since_last_whole_heap_examined() { 506 return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC; 507 } 508 509 void CollectedHeap::record_whole_heap_examined_timestamp() { 510 _last_whole_heap_examined_time_ns = os::javaTimeNanos(); 511 } 512 513 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) { 514 assert(timer != NULL, "timer is null"); 515 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) { 516 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer); 517 HeapDumper::dump_heap(); 518 } 519 520 LogTarget(Trace, gc, classhisto) lt; 521 if (lt.is_enabled()) { 522 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer); 523 ResourceMark rm; 524 LogStream ls(lt); 525 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */); 526 inspector.doit(); 527 } 528 } 529 530 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { 531 full_gc_dump(timer, true); 532 } 533 534 void CollectedHeap::post_full_gc_dump(GCTimer* timer) { 535 full_gc_dump(timer, false); 536 } 537 538 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) { 539 // It is important to do this in a way such that concurrent readers can't 540 // temporarily think something is in the heap. (Seen this happen in asserts.) 541 _reserved.set_word_size(0); 542 _reserved.set_start((HeapWord*)rs.base()); 543 _reserved.set_end((HeapWord*)rs.end()); 544 } 545 546 void CollectedHeap::post_initialize() { 547 initialize_serviceability(); 548 } 549 550 #ifndef PRODUCT 551 552 bool CollectedHeap::promotion_should_fail(volatile size_t* count) { 553 // Access to count is not atomic; the value does not have to be exact. 554 if (PromotionFailureALot) { 555 const size_t gc_num = total_collections(); 556 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 557 if (elapsed_gcs >= PromotionFailureALotInterval) { 558 // Test for unsigned arithmetic wrap-around. 559 if (++*count >= PromotionFailureALotCount) { 560 *count = 0; 561 return true; 562 } 563 } 564 } 565 return false; 566 } 567 568 bool CollectedHeap::promotion_should_fail() { 569 return promotion_should_fail(&_promotion_failure_alot_count); 570 } 571 572 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 573 if (PromotionFailureALot) { 574 _promotion_failure_alot_gc_number = total_collections(); 575 *count = 0; 576 } 577 } 578 579 void CollectedHeap::reset_promotion_should_fail() { 580 reset_promotion_should_fail(&_promotion_failure_alot_count); 581 } 582 583 #endif // #ifndef PRODUCT 584 585 bool CollectedHeap::supports_object_pinning() const { 586 return false; 587 } 588 589 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { 590 ShouldNotReachHere(); 591 return NULL; 592 } 593 594 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { 595 ShouldNotReachHere(); 596 } 597 598 void CollectedHeap::deduplicate_string(oop str) { 599 // Do nothing, unless overridden in subclass. 600 } 601 602 uint32_t CollectedHeap::hash_oop(oop obj) const { 603 const uintptr_t addr = cast_from_oop<uintptr_t>(obj); 604 return static_cast<uint32_t>(addr >> LogMinObjAlignment); 605 }