1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "services/memoryService.hpp" 48 #include "trace/tracing.hpp" 49 #include "utilities/xmlstream.hpp" 50 51 52 // Helper class for printing in CodeCache 53 class CodeBlob_sizes { 54 private: 55 int count; 56 int total_size; 57 int header_size; 58 int code_size; 59 int stub_size; 60 int relocation_size; 61 int scopes_oop_size; 62 int scopes_metadata_size; 63 int scopes_data_size; 64 int scopes_pcs_size; 65 66 public: 67 CodeBlob_sizes() { 68 count = 0; 69 total_size = 0; 70 header_size = 0; 71 code_size = 0; 72 stub_size = 0; 73 relocation_size = 0; 74 scopes_oop_size = 0; 75 scopes_metadata_size = 0; 76 scopes_data_size = 0; 77 scopes_pcs_size = 0; 78 } 79 80 int total() { return total_size; } 81 bool is_empty() { return count == 0; } 82 83 void print(const char* title) { 84 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 85 count, 86 title, 87 total() / K, 88 header_size * 100 / total_size, 89 relocation_size * 100 / total_size, 90 code_size * 100 / total_size, 91 stub_size * 100 / total_size, 92 scopes_oop_size * 100 / total_size, 93 scopes_metadata_size * 100 / total_size, 94 scopes_data_size * 100 / total_size, 95 scopes_pcs_size * 100 / total_size); 96 } 97 98 void add(CodeBlob* cb) { 99 count++; 100 total_size += cb->size(); 101 header_size += cb->header_size(); 102 relocation_size += cb->relocation_size(); 103 if (cb->is_nmethod()) { 104 nmethod* nm = cb->as_nmethod_or_null(); 105 code_size += nm->insts_size(); 106 stub_size += nm->stub_size(); 107 108 scopes_oop_size += nm->oops_size(); 109 scopes_metadata_size += nm->metadata_size(); 110 scopes_data_size += nm->scopes_data_size(); 111 scopes_pcs_size += nm->scopes_pcs_size(); 112 } else { 113 code_size += cb->code_size(); 114 } 115 } 116 }; 117 118 // Iterate over all CodeHeaps 119 #define FOR_ALL_HEAPS(it) for (GrowableArrayIterator<CodeHeap*> it = _heaps->begin(); it != _heaps->end(); ++it) 120 // Iterate over all CodeHeaps containing nmethods 121 #define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator<CodeHeap*, IsMethodPredicate> it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it) 122 // Iterate over all CodeBlobs (cb) on the given CodeHeap 123 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 124 // Iterate over all alive CodeBlobs (cb) on the given CodeHeap 125 #define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb)) 126 127 address CodeCache::_low_bound = 0; 128 address CodeCache::_high_bound = 0; 129 int CodeCache::_number_of_blobs = 0; 130 int CodeCache::_number_of_adapters = 0; 131 int CodeCache::_number_of_nmethods = 0; 132 int CodeCache::_number_of_nmethods_with_dependencies = 0; 133 bool CodeCache::_needs_cache_clean = false; 134 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 135 int CodeCache::_codemem_full_count = 0; 136 137 // Initialize array of CodeHeaps 138 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true); 139 140 void CodeCache::initialize_heaps() { 141 // Check if custom ReservedCodeCacheSize is set and adapt CodeHeap sizes accordingly 142 if (!FLAG_IS_DEFAULT(ReservedCodeCacheSize) && FLAG_IS_DEFAULT(NonMethodCodeHeapSize) 143 && FLAG_IS_DEFAULT(ProfiledCodeHeapSize) && FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { 144 if (ReservedCodeCacheSize > NonMethodCodeHeapSize) { 145 // Use the default value for NonMethodCodeHeapSize and use 2/3 of the 146 // remaining size for non-profiled methods and 1/3 for profiled methods 147 size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize; 148 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, remaining_size * (double)1/3); 149 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, remaining_size * (double)2/3); 150 } else { 151 // Use all space for the non-method heap and set other heaps to minimal size 152 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 153 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, os::vm_page_size()); 154 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, os::vm_page_size()); 155 } 156 } 157 158 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 159 if(!heap_available(CodeBlobType::MethodProfiled)) { 160 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 161 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, 0); 162 } 163 164 // Size check 165 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 166 167 // Align reserved sizes of CodeHeaps 168 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize); 169 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); 170 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); 171 172 // Compute initial sizes of CodeHeaps 173 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); 174 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); 175 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); 176 177 // Reserve one continuous chunk of memory for CodeHeaps and split it into 178 // parts for the individual heaps. The memory layout looks like this: 179 // ---------- high ----------- 180 // Non-profiled nmethods 181 // Profiled nmethods 182 // Non-methods 183 // ---------- low ------------ 184 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); 185 ReservedSpace non_method_space = rs.first_part(non_method_size); 186 ReservedSpace rest = rs.last_part(non_method_size); 187 ReservedSpace profiled_space = rest.first_part(profiled_size); 188 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 189 190 // Non-methods (stubs, adapters, ...) 191 add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod); 192 // Tier 2 and tier 3 (profiled) methods 193 add_heap(profiled_space, "Profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled); 194 // Tier 1 and tier 4 (non-profiled) methods and native methods 195 add_heap(non_profiled_space, "Non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled); 196 } 197 198 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 199 // Determine alignment 200 const size_t page_size = os::can_execute_large_page_memory() ? 201 os::page_size_for_region(InitialCodeCacheSize, size, 8) : 202 os::vm_page_size(); 203 const size_t granularity = os::vm_allocation_granularity(); 204 const size_t r_align = MAX2(page_size, granularity); 205 const size_t r_size = align_size_up(size, r_align); 206 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 207 MAX2(page_size, granularity); 208 209 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 210 211 // Initialize bounds 212 _low_bound = (address)rs.base(); 213 _high_bound = _low_bound + rs.size(); 214 guarantee(low_bound() < high_bound(), "Bound check"); 215 216 return rs; 217 } 218 219 bool CodeCache::heap_available(int code_blob_type) { 220 if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) { 221 // Use all heaps for TieredCompilation 222 return true; 223 } else { 224 // Without TieredCompilation we only need the non-profiled heap 225 return (code_blob_type == CodeBlobType::MethodNonProfiled); 226 } 227 } 228 229 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { 230 // Check if heap is needed 231 if (!heap_available(code_blob_type)) { 232 return; 233 } 234 235 // Create CodeHeap 236 CodeHeap* heap = new CodeHeap(name, code_blob_type); 237 _heaps->append(heap); 238 239 // Reserve Space 240 size_initial = round_to(size_initial, os::vm_page_size()); 241 242 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 243 vm_exit_during_initialization("Could not reserve enough space for code cache"); 244 } 245 246 // Register the CodeHeap 247 MemoryService::add_code_heap_memory_pool(heap, name); 248 } 249 250 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 251 FOR_ALL_HEAPS(it) { 252 if ((*it)->accepts(code_blob_type)) { 253 return (*it); 254 } 255 } 256 return NULL; 257 } 258 259 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 260 assert_locked_or_safepoint(CodeCache_lock); 261 if (heap != NULL) { 262 return (CodeBlob*)heap->first(); 263 } 264 return NULL; 265 } 266 267 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 268 assert_locked_or_safepoint(CodeCache_lock); 269 if (heap != NULL) { 270 return (CodeBlob*)heap->next(cb); 271 } 272 return NULL; 273 } 274 275 CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) { 276 assert_locked_or_safepoint(CodeCache_lock); 277 CodeBlob* cb = first_blob(heap); 278 while (cb != NULL && !cb->is_alive()) { 279 cb = next_blob(heap, cb); 280 } 281 return cb; 282 } 283 284 CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) { 285 assert_locked_or_safepoint(CodeCache_lock); 286 cb = next_blob(heap, cb); 287 while (cb != NULL && !cb->is_alive()) { 288 cb = next_blob(heap, cb); 289 } 290 return cb; 291 } 292 293 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { 294 // Do not seize the CodeCache lock here--if the caller has not 295 // already done so, we are going to lose bigtime, since the code 296 // cache will contain a garbage CodeBlob until the caller can 297 // run the constructor for the CodeBlob subclass he is busy 298 // instantiating. 299 guarantee(size >= 0, "allocation request must be reasonable"); 300 assert_locked_or_safepoint(CodeCache_lock); 301 CodeBlob* cb = NULL; 302 _number_of_blobs++; 303 304 // Get CodeHeap for the given CodeBlobType 305 CodeHeap* heap = get_code_heap(code_blob_type); 306 assert (heap != NULL, "Heap exists"); 307 308 while (true) { 309 cb = (CodeBlob*)heap->allocate(size, is_critical); 310 if (cb != NULL) break; 311 if (!heap->expand_by(CodeCacheExpansionSize)) { 312 // Expansion failed 313 return NULL; 314 } 315 if (PrintCodeCacheExtension) { 316 ResourceMark rm; 317 tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 318 heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 319 (address)heap->high() - (address)heap->low_boundary()); 320 } 321 } 322 323 verify_if_often(); 324 print_trace("allocation", cb, size); 325 326 return cb; 327 } 328 329 void CodeCache::free(CodeBlob* cb, int code_blob_type) { 330 assert_locked_or_safepoint(CodeCache_lock); 331 verify_if_often(); 332 333 print_trace("free", cb); 334 if (cb->is_nmethod()) { 335 _number_of_nmethods--; 336 if (((nmethod *)cb)->has_dependencies()) { 337 _number_of_nmethods_with_dependencies--; 338 } 339 } 340 if (cb->is_adapter_blob()) { 341 _number_of_adapters--; 342 } 343 _number_of_blobs--; 344 345 // Get heap for given CodeBlobType and deallocate 346 get_code_heap(code_blob_type)->deallocate(cb); 347 348 verify_if_often(); 349 assert(_number_of_blobs >= 0, "sanity check"); 350 } 351 352 void CodeCache::commit(CodeBlob* cb) { 353 // this is called by nmethod::nmethod, which must already own CodeCache_lock 354 assert_locked_or_safepoint(CodeCache_lock); 355 if (cb->is_nmethod()) { 356 _number_of_nmethods++; 357 if (((nmethod *)cb)->has_dependencies()) { 358 _number_of_nmethods_with_dependencies++; 359 } 360 } 361 if (cb->is_adapter_blob()) { 362 _number_of_adapters++; 363 } 364 365 // flush the hardware I-cache 366 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 367 } 368 369 bool CodeCache::contains(void *p) { 370 // It should be ok to call contains without holding a lock 371 FOR_ALL_HEAPS(it) { 372 if ((*it)->contains(p)) { 373 return true; 374 } 375 } 376 return false; 377 } 378 379 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 380 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 381 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 382 CodeBlob* CodeCache::find_blob(void* start) { 383 CodeBlob* result = find_blob_unsafe(start); 384 // We could potentially look up non_entrant methods 385 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 386 return result; 387 } 388 389 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 390 // what you are doing) 391 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 392 // NMT can walk the stack before code cache is created 393 if (_heaps->first() == NULL) return NULL; 394 395 FOR_ALL_HEAPS(it) { 396 CodeBlob* result = (CodeBlob*) (*it)->find_start(start); 397 if (result != NULL && result->blob_contains((address)start)) { 398 return result; 399 } 400 } 401 return NULL; 402 } 403 404 nmethod* CodeCache::find_nmethod(void* start) { 405 CodeBlob* cb = find_blob(start); 406 assert(cb->is_nmethod(), "did not find an nmethod"); 407 return (nmethod*)cb; 408 } 409 410 bool CodeCache::contains_nmethod(nmethod* nm) { 411 FOR_ALL_METHOD_HEAPS(it) { 412 if ((*it)->contains(nm)) { 413 return true; 414 } 415 } 416 return false; 417 } 418 419 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 420 assert_locked_or_safepoint(CodeCache_lock); 421 FOR_ALL_HEAPS(it) { 422 FOR_ALL_BLOBS(cb, *it) { 423 f(cb); 424 } 425 } 426 } 427 428 void CodeCache::nmethods_do(void f(nmethod* nm)) { 429 assert_locked_or_safepoint(CodeCache_lock); 430 FOR_ALL_METHOD_HEAPS(it) { 431 FOR_ALL_BLOBS(cb, *it) { 432 f((nmethod*)cb); 433 } 434 } 435 } 436 437 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 438 assert_locked_or_safepoint(CodeCache_lock); 439 FOR_ALL_METHOD_HEAPS(it) { 440 FOR_ALL_ALIVE_BLOBS(cb, *it) { 441 f((nmethod*)cb); 442 } 443 } 444 } 445 446 int CodeCache::alignment_unit() { 447 return (int)_heaps->first()->alignment_unit(); 448 } 449 450 int CodeCache::alignment_offset() { 451 return (int)_heaps->first()->alignment_offset(); 452 } 453 454 // Mark nmethods for unloading if they contain otherwise unreachable oops. 455 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 456 assert_locked_or_safepoint(CodeCache_lock); 457 FOR_ALL_METHOD_HEAPS(it) { 458 FOR_ALL_ALIVE_BLOBS(cb, *it) { 459 nmethod* nm = (nmethod*)cb; 460 nm->do_unloading(is_alive, unloading_occurred); 461 } 462 } 463 } 464 465 void CodeCache::blobs_do(CodeBlobClosure* f) { 466 assert_locked_or_safepoint(CodeCache_lock); 467 FOR_ALL_HEAPS(it) { 468 FOR_ALL_BLOBS(cb, *it) { 469 if (cb->is_alive()) { 470 f->do_code_blob(cb); 471 472 #ifdef ASSERT 473 if (cb->is_nmethod()) 474 ((nmethod*)cb)->verify_scavenge_root_oops(); 475 #endif //ASSERT 476 } 477 } 478 } 479 } 480 481 // Walk the list of methods which might contain non-perm oops. 482 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 483 assert_locked_or_safepoint(CodeCache_lock); 484 debug_only(mark_scavenge_root_nmethods()); 485 486 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 487 debug_only(cur->clear_scavenge_root_marked()); 488 assert(cur->scavenge_root_not_marked(), ""); 489 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 490 491 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 492 #ifndef PRODUCT 493 if (TraceScavenge) { 494 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 495 } 496 #endif //PRODUCT 497 if (is_live) { 498 // Perform cur->oops_do(f), maybe just once per nmethod. 499 f->do_code_blob(cur); 500 } 501 } 502 503 // Check for stray marks. 504 debug_only(verify_perm_nmethods(NULL)); 505 } 506 507 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 508 assert_locked_or_safepoint(CodeCache_lock); 509 nm->set_on_scavenge_root_list(); 510 nm->set_scavenge_root_link(_scavenge_root_nmethods); 511 set_scavenge_root_nmethods(nm); 512 print_trace("add_scavenge_root", nm); 513 } 514 515 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 516 assert_locked_or_safepoint(CodeCache_lock); 517 print_trace("drop_scavenge_root", nm); 518 nmethod* last = NULL; 519 nmethod* cur = scavenge_root_nmethods(); 520 while (cur != NULL) { 521 nmethod* next = cur->scavenge_root_link(); 522 if (cur == nm) { 523 if (last != NULL) 524 last->set_scavenge_root_link(next); 525 else set_scavenge_root_nmethods(next); 526 nm->set_scavenge_root_link(NULL); 527 nm->clear_on_scavenge_root_list(); 528 return; 529 } 530 last = cur; 531 cur = next; 532 } 533 assert(false, "should have been on list"); 534 } 535 536 void CodeCache::prune_scavenge_root_nmethods() { 537 assert_locked_or_safepoint(CodeCache_lock); 538 debug_only(mark_scavenge_root_nmethods()); 539 540 nmethod* last = NULL; 541 nmethod* cur = scavenge_root_nmethods(); 542 while (cur != NULL) { 543 nmethod* next = cur->scavenge_root_link(); 544 debug_only(cur->clear_scavenge_root_marked()); 545 assert(cur->scavenge_root_not_marked(), ""); 546 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 547 548 if (!cur->is_zombie() && !cur->is_unloaded() 549 && cur->detect_scavenge_root_oops()) { 550 // Keep it. Advance 'last' to prevent deletion. 551 last = cur; 552 } else { 553 // Prune it from the list, so we don't have to look at it any more. 554 print_trace("prune_scavenge_root", cur); 555 cur->set_scavenge_root_link(NULL); 556 cur->clear_on_scavenge_root_list(); 557 if (last != NULL) 558 last->set_scavenge_root_link(next); 559 else set_scavenge_root_nmethods(next); 560 } 561 cur = next; 562 } 563 564 // Check for stray marks. 565 debug_only(verify_perm_nmethods(NULL)); 566 } 567 568 #ifndef PRODUCT 569 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 570 // While we are here, verify the integrity of the list. 571 mark_scavenge_root_nmethods(); 572 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 573 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 574 cur->clear_scavenge_root_marked(); 575 } 576 verify_perm_nmethods(f); 577 } 578 579 // Temporarily mark nmethods that are claimed to be on the non-perm list. 580 void CodeCache::mark_scavenge_root_nmethods() { 581 FOR_ALL_METHOD_HEAPS(it) { 582 FOR_ALL_ALIVE_BLOBS(cb, *it) { 583 nmethod* nm = (nmethod*)cb; 584 assert(nm->scavenge_root_not_marked(), "clean state"); 585 if (nm->on_scavenge_root_list()) 586 nm->set_scavenge_root_marked(); 587 } 588 } 589 } 590 591 // If the closure is given, run it on the unlisted nmethods. 592 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 593 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 594 FOR_ALL_METHOD_HEAPS(it) { 595 FOR_ALL_ALIVE_BLOBS(cb, *it) { 596 nmethod* nm = (nmethod*)cb; 597 bool call_f = (f_or_null != NULL); 598 assert(nm->scavenge_root_not_marked(), "must be already processed"); 599 if (nm->on_scavenge_root_list()) 600 call_f = false; // don't show this one to the client 601 nm->verify_scavenge_root_oops(); 602 if (call_f) f_or_null->do_code_blob(nm); 603 } 604 } 605 } 606 #endif //PRODUCT 607 608 void CodeCache::gc_prologue() { 609 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 610 } 611 612 void CodeCache::gc_epilogue() { 613 assert_locked_or_safepoint(CodeCache_lock); 614 FOR_ALL_METHOD_HEAPS(it) { 615 FOR_ALL_ALIVE_BLOBS(cb, *it) { 616 nmethod* nm = (nmethod*)cb; 617 assert(!nm->is_unloaded(), "Tautology"); 618 if (needs_cache_clean()) { 619 nm->cleanup_inline_caches(); 620 } 621 DEBUG_ONLY(nm->verify()); 622 nm->fix_oop_relocations(); 623 } 624 } 625 set_needs_cache_clean(false); 626 prune_scavenge_root_nmethods(); 627 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 628 629 #ifdef ASSERT 630 // make sure that we aren't leaking icholders 631 int count = 0; 632 FOR_ALL_METHOD_HEAPS(it) { 633 FOR_ALL_BLOBS(cb, *it) { 634 RelocIterator iter((nmethod*)cb); 635 while(iter.next()) { 636 if (iter.type() == relocInfo::virtual_call_type) { 637 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { 638 CompiledIC *ic = CompiledIC_at(iter.reloc()); 639 if (TraceCompiledIC) { 640 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder()); 641 ic->print(); 642 } 643 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 644 count++; 645 } 646 } 647 } 648 } 649 } 650 651 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 652 CompiledICHolder::live_count(), "must agree"); 653 #endif 654 } 655 656 void CodeCache::verify_oops() { 657 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 658 VerifyOopClosure voc; 659 FOR_ALL_METHOD_HEAPS(it) { 660 FOR_ALL_ALIVE_BLOBS(cb, *it) { 661 nmethod* nm = (nmethod*)cb; 662 nm->oops_do(&voc); 663 nm->verify_oop_relocations(); 664 } 665 } 666 } 667 668 size_t CodeCache::capacity() { 669 size_t cap = 0; 670 FOR_ALL_HEAPS(it) { 671 cap += (*it)->capacity(); 672 } 673 return cap; 674 } 675 676 size_t CodeCache::unallocated_capacity() { 677 size_t unallocated_cap = 0; 678 FOR_ALL_HEAPS(it) { 679 unallocated_cap += (*it)->unallocated_capacity(); 680 } 681 return unallocated_cap; 682 } 683 684 size_t CodeCache::max_capacity() { 685 size_t max_cap = 0; 686 FOR_ALL_HEAPS(it) { 687 max_cap += (*it)->max_capacity(); 688 } 689 return max_cap; 690 } 691 692 /** 693 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 694 * is free, reverse_free_ratio() returns 4. 695 */ 696 double CodeCache::reverse_free_ratio(int code_blob_type) { 697 CodeHeap* heap = get_code_heap(code_blob_type); 698 if (heap == NULL) { 699 return 0; 700 } 701 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); 702 double max_capacity = (double)heap->max_capacity(); 703 return max_capacity / unallocated_capacity; 704 } 705 706 void icache_init(); 707 708 void CodeCache::initialize() { 709 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 710 #ifdef COMPILER2 711 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 712 #endif 713 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 714 // This was originally just a check of the alignment, causing failure, instead, round 715 // the code cache to the page size. In particular, Solaris is moving to a larger 716 // default page size. 717 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 718 719 // Reserve space and create heaps 720 initialize_heaps(); 721 722 // Initialize ICache flush mechanism 723 // This service is needed for os::register_code_area 724 icache_init(); 725 726 // Give OS a chance to register generated code area. 727 // This is used on Windows 64 bit platforms to register 728 // Structured Exception Handlers for our generated code. 729 os::register_code_area((char*)low_bound(), (char*)high_bound()); 730 } 731 732 void codeCache_init() { 733 CodeCache::initialize(); 734 } 735 736 //------------------------------------------------------------------------------------------------ 737 738 int CodeCache::number_of_nmethods_with_dependencies() { 739 return _number_of_nmethods_with_dependencies; 740 } 741 742 #ifndef PRODUCT 743 // used to keep track of how much time is spent in mark_for_deoptimization 744 static elapsedTimer dependentCheckTime; 745 static int dependentCheckCount = 0; 746 #endif // PRODUCT 747 748 749 int CodeCache::mark_for_deoptimization(DepChange& changes) { 750 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 751 752 #ifndef PRODUCT 753 dependentCheckTime.start(); 754 dependentCheckCount++; 755 #endif // PRODUCT 756 757 int number_of_marked_CodeBlobs = 0; 758 759 // search the hierarchy looking for nmethods which are affected by the loading of this class 760 761 // then search the interfaces this class implements looking for nmethods 762 // which might be dependent of the fact that an interface only had one 763 // implementor. 764 765 { No_Safepoint_Verifier nsv; 766 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 767 Klass* d = str.klass(); 768 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 769 } 770 } 771 772 if (VerifyDependencies) { 773 // Turn off dependency tracing while actually testing deps. 774 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 775 FOR_ALL_METHOD_HEAPS(it) { 776 FOR_ALL_ALIVE_BLOBS(cb, *it) { 777 nmethod* nm = (nmethod*)cb; 778 if (!nm->is_marked_for_deoptimization() && 779 nm->check_all_dependencies()) { 780 ResourceMark rm; 781 tty->print_cr("Should have been marked for deoptimization:"); 782 changes.print(); 783 nm->print(); 784 nm->print_dependencies(); 785 } 786 } 787 } 788 } 789 790 #ifndef PRODUCT 791 dependentCheckTime.stop(); 792 #endif // PRODUCT 793 794 return number_of_marked_CodeBlobs; 795 } 796 797 798 #ifdef HOTSWAP 799 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 800 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 801 int number_of_marked_CodeBlobs = 0; 802 803 // Deoptimize all methods of the evolving class itself 804 Array<Method*>* old_methods = dependee->methods(); 805 for (int i = 0; i < old_methods->length(); i++) { 806 ResourceMark rm; 807 Method* old_method = old_methods->at(i); 808 nmethod *nm = old_method->code(); 809 if (nm != NULL) { 810 nm->mark_for_deoptimization(); 811 number_of_marked_CodeBlobs++; 812 } 813 } 814 815 FOR_ALL_METHOD_HEAPS(it) { 816 FOR_ALL_ALIVE_BLOBS(cb, *it) { 817 nmethod* nm = (nmethod*)cb; 818 if (nm->is_marked_for_deoptimization()) { 819 // ...Already marked in the previous pass; don't count it again. 820 } else if (nm->is_evol_dependent_on(dependee())) { 821 ResourceMark rm; 822 nm->mark_for_deoptimization(); 823 number_of_marked_CodeBlobs++; 824 } else { 825 // flush caches in case they refer to a redefined Method* 826 nm->clear_inline_caches(); 827 } 828 } 829 } 830 831 return number_of_marked_CodeBlobs; 832 } 833 #endif // HOTSWAP 834 835 836 // Deoptimize all methods 837 void CodeCache::mark_all_nmethods_for_deoptimization() { 838 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 839 FOR_ALL_METHOD_HEAPS(it) { 840 FOR_ALL_ALIVE_BLOBS(cb, *it) { 841 nmethod* nm = (nmethod*)cb; 842 nm->mark_for_deoptimization(); 843 } 844 } 845 } 846 847 int CodeCache::mark_for_deoptimization(Method* dependee) { 848 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 849 int number_of_marked_CodeBlobs = 0; 850 851 FOR_ALL_METHOD_HEAPS(it) { 852 FOR_ALL_ALIVE_BLOBS(cb, *it) { 853 nmethod* nm = (nmethod*)cb; 854 if (nm->is_dependent_on_method(dependee)) { 855 ResourceMark rm; 856 nm->mark_for_deoptimization(); 857 number_of_marked_CodeBlobs++; 858 } 859 } 860 } 861 862 return number_of_marked_CodeBlobs; 863 } 864 865 void CodeCache::make_marked_nmethods_zombies() { 866 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 867 FOR_ALL_METHOD_HEAPS(it) { 868 FOR_ALL_ALIVE_BLOBS(cb, *it) { 869 nmethod* nm = (nmethod*)cb; 870 if (nm->is_marked_for_deoptimization()) { 871 872 // If the nmethod has already been made non-entrant and it can be converted 873 // then zombie it now. Otherwise make it non-entrant and it will eventually 874 // be zombied when it is no longer seen on the stack. Note that the nmethod 875 // might be "entrant" and not on the stack and so could be zombied immediately 876 // but we can't tell because we don't track it on stack until it becomes 877 // non-entrant. 878 879 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 880 nm->make_zombie(); 881 } else { 882 nm->make_not_entrant(); 883 } 884 } 885 } 886 } 887 } 888 889 void CodeCache::make_marked_nmethods_not_entrant() { 890 assert_locked_or_safepoint(CodeCache_lock); 891 FOR_ALL_METHOD_HEAPS(it) { 892 FOR_ALL_ALIVE_BLOBS(cb, *it) { 893 nmethod* nm = (nmethod*)cb; 894 if (nm->is_marked_for_deoptimization()) { 895 nm->make_not_entrant(); 896 } 897 } 898 } 899 } 900 901 void CodeCache::verify() { 902 assert_locked_or_safepoint(CodeCache_lock); 903 FOR_ALL_HEAPS(it) { 904 CodeHeap* heap = *it; 905 heap->verify(); 906 FOR_ALL_BLOBS(cb, heap) { 907 if (cb->is_alive()) { 908 cb->verify(); 909 } 910 } 911 } 912 } 913 914 // A CodeHeap is full. Print out warning and report event. 915 void CodeCache::report_codemem_full(int code_blob_type) { 916 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 917 CodeHeap* heap = get_code_heap(code_blob_type); 918 919 if (!heap->was_full()) { 920 // Not yet reported for this heap, report 921 heap->report_full(); 922 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type)); 923 warning("Try increasing the code heap size using -XX:%s=", 924 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); 925 926 ResourceMark rm; 927 stringStream s; 928 // Dump CodeCache summary into a buffer before locking the tty 929 { 930 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 931 print_summary(&s, true); 932 } 933 ttyLocker ttyl; 934 tty->print(s.as_string()); 935 } 936 937 _codemem_full_count++; 938 EventCodeCacheFull event; 939 if (event.should_commit()) { 940 event.set_codeBlobType(code_blob_type); 941 event.set_startAddress((u8)heap->low_boundary()); 942 event.set_commitedTopAddress((u8)heap->high()); 943 event.set_reservedTopAddress((u8)heap->high_boundary()); 944 event.set_entryCount(nof_blobs()); 945 event.set_methodCount(nof_nmethods()); 946 event.set_adaptorCount(nof_adapters()); 947 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 948 event.set_fullCount(_codemem_full_count); 949 event.commit(); 950 } 951 } 952 953 //------------------------------------------------------------------------------------------------ 954 // Non-product version 955 956 #ifndef PRODUCT 957 958 void CodeCache::verify_if_often() { 959 if (VerifyCodeCacheOften) { 960 FOR_ALL_HEAPS(it) { 961 (*it)->verify(); 962 } 963 } 964 } 965 966 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 967 if (PrintCodeCache2) { // Need to add a new flag 968 ResourceMark rm; 969 if (size == 0) size = cb->size(); 970 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size); 971 } 972 } 973 974 void CodeCache::print_internals() { 975 int nmethodCount = 0; 976 int runtimeStubCount = 0; 977 int adapterCount = 0; 978 int deoptimizationStubCount = 0; 979 int uncommonTrapStubCount = 0; 980 int bufferBlobCount = 0; 981 int total = 0; 982 int nmethodAlive = 0; 983 int nmethodNotEntrant = 0; 984 int nmethodZombie = 0; 985 int nmethodUnloaded = 0; 986 int nmethodJava = 0; 987 int nmethodNative = 0; 988 int maxCodeSize = 0; 989 ResourceMark rm; 990 991 int i = 0; 992 FOR_ALL_HEAPS(it) { 993 if (Verbose) { 994 tty->print_cr("## Heap '%s' ##", (*it)->name()); 995 } 996 FOR_ALL_BLOBS(cb, *it) { 997 total++; 998 if (cb->is_nmethod()) { 999 nmethod* nm = (nmethod*)cb; 1000 1001 if (Verbose && nm->method() != NULL) { 1002 ResourceMark rm; 1003 char *method_name = nm->method()->name_and_sig_as_C_string(); 1004 tty->print("%s %d", method_name, nm->comp_level()); 1005 if(nm->is_alive()) { tty->print_cr(" alive"); } 1006 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1007 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1008 } 1009 1010 nmethodCount++; 1011 1012 if(nm->is_alive()) { nmethodAlive++; } 1013 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1014 if(nm->is_zombie()) { nmethodZombie++; } 1015 if(nm->is_unloaded()) { nmethodUnloaded++; } 1016 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1017 1018 if(nm->method() != NULL && nm->is_java_method()) { 1019 nmethodJava++; 1020 if (nm->insts_size() > maxCodeSize) { 1021 maxCodeSize = nm->insts_size(); 1022 } 1023 } 1024 } else if (cb->is_runtime_stub()) { 1025 runtimeStubCount++; 1026 } else if (cb->is_deoptimization_stub()) { 1027 deoptimizationStubCount++; 1028 } else if (cb->is_uncommon_trap_stub()) { 1029 uncommonTrapStubCount++; 1030 } else if (cb->is_adapter_blob()) { 1031 adapterCount++; 1032 } else if (cb->is_buffer_blob()) { 1033 bufferBlobCount++; 1034 } 1035 } 1036 } 1037 1038 int bucketSize = 512; 1039 int bucketLimit = maxCodeSize / bucketSize + 1; 1040 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1041 memset(buckets,0,sizeof(int) * bucketLimit); 1042 1043 FOR_ALL_METHOD_HEAPS(it) { 1044 FOR_ALL_BLOBS(cb, *it) { 1045 nmethod* nm = (nmethod*)cb; 1046 if(nm->method() != NULL && nm->is_java_method()) { 1047 buckets[nm->insts_size() / bucketSize]++; 1048 } 1049 } 1050 } 1051 tty->print_cr("Code Cache Entries (total of %d)",total); 1052 tty->print_cr("-------------------------------------------------"); 1053 tty->print_cr("nmethods: %d",nmethodCount); 1054 tty->print_cr("\talive: %d",nmethodAlive); 1055 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1056 tty->print_cr("\tzombie: %d",nmethodZombie); 1057 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1058 tty->print_cr("\tjava: %d",nmethodJava); 1059 tty->print_cr("\tnative: %d",nmethodNative); 1060 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1061 tty->print_cr("adapters: %d",adapterCount); 1062 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1063 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1064 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1065 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1066 tty->print_cr("-------------------------------------------------"); 1067 1068 for(int i = 0; i < bucketLimit; ++i) { 1069 if(buckets[i] != 0) { 1070 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1071 tty->fill_to(40); 1072 tty->print_cr("%d",buckets[i]); 1073 } 1074 } 1075 1076 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 1077 } 1078 1079 #endif // !PRODUCT 1080 1081 void CodeCache::print() { 1082 print_summary(tty); 1083 1084 #ifndef PRODUCT 1085 if (!Verbose) return; 1086 1087 CodeBlob_sizes live; 1088 CodeBlob_sizes dead; 1089 1090 FOR_ALL_HEAPS(it) { 1091 FOR_ALL_BLOBS(cb, *it) { 1092 if (!cb->is_alive()) { 1093 dead.add(cb); 1094 } else { 1095 live.add(cb); 1096 } 1097 } 1098 } 1099 1100 tty->print_cr("CodeCache:"); 1101 1102 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 1103 dependentCheckTime.seconds() / dependentCheckCount); 1104 1105 if (!live.is_empty()) { 1106 live.print("live"); 1107 } 1108 if (!dead.is_empty()) { 1109 dead.print("dead"); 1110 } 1111 1112 if (WizardMode) { 1113 // print the oop_map usage 1114 int code_size = 0; 1115 int number_of_blobs = 0; 1116 int number_of_oop_maps = 0; 1117 int map_size = 0; 1118 FOR_ALL_HEAPS(it) { 1119 FOR_ALL_BLOBS(cb, *it) { 1120 if (cb->is_alive()) { 1121 number_of_blobs++; 1122 code_size += cb->code_size(); 1123 OopMapSet* set = cb->oop_maps(); 1124 if (set != NULL) { 1125 number_of_oop_maps += set->size(); 1126 map_size += set->heap_size(); 1127 } 1128 } 1129 } 1130 } 1131 tty->print_cr("OopMaps"); 1132 tty->print_cr(" #blobs = %d", number_of_blobs); 1133 tty->print_cr(" code size = %d", code_size); 1134 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1135 tty->print_cr(" map size = %d", map_size); 1136 } 1137 1138 #endif // !PRODUCT 1139 } 1140 1141 void CodeCache::print_summary(outputStream* st, bool detailed) { 1142 st->print_cr("CodeCache Summary:"); 1143 FOR_ALL_HEAPS(it) { 1144 CodeHeap* heap = (*it); 1145 size_t total = (heap->high_boundary() - heap->low_boundary()); 1146 st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1147 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1148 heap->name(), total/K, (total - heap->unallocated_capacity())/K, 1149 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1150 1151 if (detailed) { 1152 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1153 heap->low_boundary(), 1154 heap->high(), 1155 heap->high_boundary()); 1156 1157 } 1158 } 1159 1160 if (detailed) { 1161 log_state(st); 1162 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1163 "enabled" : Arguments::mode() == Arguments::_int ? 1164 "disabled (interpreter mode)" : 1165 "disabled (not enough contiguous free space left)"); 1166 } 1167 } 1168 1169 void CodeCache::log_state(outputStream* st) { 1170 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1171 " adapters='" UINT32_FORMAT "'", 1172 nof_blobs(), nof_nmethods(), nof_adapters()); 1173 } 1174