1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/nmethod.hpp" 31 #include "code/vtableStubs.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/metadataFactory.hpp" 35 #include "memory/oopFactory.hpp" 36 #include "oops/method.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "oops/symbol.hpp" 39 #include "runtime/icache.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "utilities/events.hpp" 43 44 45 // Every time a compiled IC is changed or its type is being accessed, 46 // either the CompiledIC_lock must be set or we must be at a safe point. 47 48 //----------------------------------------------------------------------------- 49 // Low-level access to an inline cache. Private, since they might not be 50 // MT-safe to use. 51 52 void* CompiledIC::cached_value() const { 53 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 54 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); 55 56 if (!is_in_transition_state()) { 57 void* data = (void*)_value->data(); 58 // If we let the metadata value here be initialized to zero... 59 assert(data != NULL || Universe::non_oop_word() == NULL, 60 "no raw nulls in CompiledIC metadatas, because of patching races"); 61 return (data == (void*)Universe::non_oop_word()) ? NULL : data; 62 } else { 63 return InlineCacheBuffer::cached_value_for((CompiledIC *)this); 64 } 65 } 66 67 68 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { 69 assert(entry_point != NULL, "must set legal entry point"); 70 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 71 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); 72 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); 73 74 assert(!is_icholder || is_icholder_entry(entry_point), "must be"); 75 76 // Don't use ic_destination for this test since that forwards 77 // through ICBuffer instead of returning the actual current state of 78 // the CompiledIC. 79 if (is_icholder_entry(_ic_call->destination())) { 80 // When patching for the ICStub case the cached value isn't 81 // overwritten until the ICStub copied into the CompiledIC during 82 // the next safepoint. Make sure that the CompiledICHolder* is 83 // marked for release at this point since it won't be identifiable 84 // once the entry point is overwritten. 85 InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data()); 86 } 87 88 if (TraceCompiledIC) { 89 tty->print(" "); 90 print_compiled_ic(); 91 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point)); 92 if (!is_optimized()) { 93 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache)); 94 } 95 if (is_icstub) { 96 tty->print(" (icstub)"); 97 } 98 tty->cr(); 99 } 100 101 { 102 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 103 #ifdef ASSERT 104 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); 105 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 106 #endif 107 _ic_call->set_destination_mt_safe(entry_point); 108 } 109 110 if (is_optimized() || is_icstub) { 111 // Optimized call sites don't have a cache value and ICStub call 112 // sites only change the entry point. Changing the value in that 113 // case could lead to MT safety issues. 114 assert(cache == NULL, "must be null"); 115 return; 116 } 117 118 if (cache == NULL) cache = (void*)Universe::non_oop_word(); 119 120 _value->set_data((intptr_t)cache); 121 } 122 123 124 void CompiledIC::set_ic_destination(ICStub* stub) { 125 internal_set_ic_destination(stub->code_begin(), true, NULL, false); 126 } 127 128 129 130 address CompiledIC::ic_destination() const { 131 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 132 if (!is_in_transition_state()) { 133 return _ic_call->destination(); 134 } else { 135 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 136 } 137 } 138 139 140 bool CompiledIC::is_in_transition_state() const { 141 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 142 return InlineCacheBuffer::contains(_ic_call->destination()); 143 } 144 145 146 bool CompiledIC::is_icholder_call() const { 147 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 148 return !_is_optimized && is_icholder_entry(ic_destination()); 149 } 150 151 // Returns native address of 'call' instruction in inline-cache. Used by 152 // the InlineCacheBuffer when it needs to find the stub. 153 address CompiledIC::stub_address() const { 154 assert(is_in_transition_state(), "should only be called when we are in a transition state"); 155 return _ic_call->destination(); 156 } 157 158 // Clears the IC stub if the compiled IC is in transition state 159 void CompiledIC::clear_ic_stub() { 160 if (is_in_transition_state()) { 161 ICStub* stub = ICStub_from_destination_address(stub_address()); 162 stub->clear(); 163 } 164 } 165 166 167 //----------------------------------------------------------------------------- 168 // High-level access to an inline cache. Guaranteed to be MT-safe. 169 170 void CompiledIC::initialize_from_iter(RelocIterator* iter) { 171 assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call"); 172 173 if (iter->type() == relocInfo::virtual_call_type) { 174 virtual_call_Relocation* r = iter->virtual_call_reloc(); 175 _is_optimized = false; 176 _value = nativeMovConstReg_at(r->cached_value()); 177 } else { 178 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); 179 _is_optimized = true; 180 _value = NULL; 181 } 182 } 183 184 CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) 185 : _ic_call(call) 186 { 187 address ic_call = _ic_call->instruction_address(); 188 189 assert(ic_call != NULL, "ic_call address must be set"); 190 assert(nm != NULL, "must pass nmethod"); 191 assert(nm->contains(ic_call), "must be in nmethod"); 192 193 // Search for the ic_call at the given address. 194 RelocIterator iter(nm, ic_call, ic_call+1); 195 bool ret = iter.next(); 196 assert(ret == true, "relocInfo must exist at this address"); 197 assert(iter.addr() == ic_call, "must find ic_call"); 198 199 initialize_from_iter(&iter); 200 } 201 202 CompiledIC::CompiledIC(RelocIterator* iter) 203 : _ic_call(nativeCall_at(iter->addr())) 204 { 205 address ic_call = _ic_call->instruction_address(); 206 207 nmethod* nm = iter->code(); 208 assert(ic_call != NULL, "ic_call address must be set"); 209 assert(nm != NULL, "must pass nmethod"); 210 assert(nm->contains(ic_call), "must be in nmethod"); 211 212 initialize_from_iter(iter); 213 } 214 215 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { 216 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 217 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 218 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 219 220 address entry; 221 if (call_info->call_kind() == CallInfo::itable_call) { 222 assert(bytecode == Bytecodes::_invokeinterface, ""); 223 int itable_index = call_info->itable_index(); 224 entry = VtableStubs::find_itable_stub(itable_index); 225 if (entry == false) { 226 return false; 227 } 228 #ifdef ASSERT 229 int index = call_info->resolved_method()->itable_index(); 230 assert(index == itable_index, "CallInfo pre-computes this"); 231 #endif //ASSERT 232 InstanceKlass* k = call_info->resolved_method()->method_holder(); 233 assert(k->verify_itable_index(itable_index), "sanity check"); 234 InlineCacheBuffer::create_transition_stub(this, k, entry); 235 } else { 236 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable"); 237 // Can be different than selected_method->vtable_index(), due to package-private etc. 238 int vtable_index = call_info->vtable_index(); 239 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check"); 240 entry = VtableStubs::find_vtable_stub(vtable_index); 241 if (entry == NULL) { 242 return false; 243 } 244 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 245 } 246 247 if (TraceICs) { 248 ResourceMark rm; 249 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 250 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry)); 251 } 252 253 // We can't check this anymore. With lazy deopt we could have already 254 // cleaned this IC entry before we even return. This is possible if 255 // we ran out of space in the inline cache buffer trying to do the 256 // set_next and we safepointed to free up space. This is a benign 257 // race because the IC entry was complete when we safepointed so 258 // cleaning it immediately is harmless. 259 // assert(is_megamorphic(), "sanity check"); 260 return true; 261 } 262 263 264 // true if destination is megamorphic stub 265 bool CompiledIC::is_megamorphic() const { 266 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 267 assert(!is_optimized(), "an optimized call cannot be megamorphic"); 268 269 // Cannot rely on cached_value. It is either an interface or a method. 270 return VtableStubs::is_entry_point(ic_destination()); 271 } 272 273 bool CompiledIC::is_call_to_compiled() const { 274 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 275 276 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 277 // method is guaranteed to still exist, since we only remove methods after all inline caches 278 // has been cleaned up 279 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 280 bool is_monomorphic = (cb != NULL && cb->is_nmethod()); 281 // Check that the cached_value is a klass for non-optimized monomorphic calls 282 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 283 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL). 284 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized 285 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites. 286 #ifdef ASSERT 287 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 288 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci(); 289 assert( is_c1_or_jvmci_method || 290 !is_monomorphic || 291 is_optimized() || 292 !caller->is_alive() || 293 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); 294 #endif // ASSERT 295 return is_monomorphic; 296 } 297 298 299 bool CompiledIC::is_call_to_interpreted() const { 300 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 301 // Call to interpreter if destination is either calling to a stub (if it 302 // is optimized), or calling to an I2C blob 303 bool is_call_to_interpreted = false; 304 if (!is_optimized()) { 305 // must use unsafe because the destination can be a zombie (and we're cleaning) 306 // and the print_compiled_ic code wants to know if site (in the non-zombie) 307 // is to the interpreter. 308 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 309 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 310 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); 311 } else { 312 // Check if we are calling into our own codeblob (i.e., to a stub) 313 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); 314 address dest = ic_destination(); 315 #ifdef ASSERT 316 { 317 CodeBlob* db = CodeCache::find_blob_unsafe(dest); 318 assert(!db->is_adapter_blob(), "must use stub!"); 319 } 320 #endif /* ASSERT */ 321 is_call_to_interpreted = cb->contains(dest); 322 } 323 return is_call_to_interpreted; 324 } 325 326 327 void CompiledIC::set_to_clean(bool in_use) { 328 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 329 if (TraceInlineCacheClearing || TraceICs) { 330 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); 331 print(); 332 } 333 334 address entry; 335 if (is_optimized()) { 336 entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); 337 } else { 338 entry = SharedRuntime::get_resolve_virtual_call_stub(); 339 } 340 341 // A zombie transition will always be safe, since the metadata has already been set to NULL, so 342 // we only need to patch the destination 343 bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); 344 345 if (safe_transition) { 346 // Kill any leftover stub we might have too 347 clear_ic_stub(); 348 if (is_optimized()) { 349 set_ic_destination(entry); 350 } else { 351 set_ic_destination_and_value(entry, (void*)NULL); 352 } 353 } else { 354 // Unsafe transition - create stub. 355 InlineCacheBuffer::create_transition_stub(this, NULL, entry); 356 } 357 // We can't check this anymore. With lazy deopt we could have already 358 // cleaned this IC entry before we even return. This is possible if 359 // we ran out of space in the inline cache buffer trying to do the 360 // set_next and we safepointed to free up space. This is a benign 361 // race because the IC entry was complete when we safepointed so 362 // cleaning it immediately is harmless. 363 // assert(is_clean(), "sanity check"); 364 } 365 366 367 bool CompiledIC::is_clean() const { 368 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 369 bool is_clean = false; 370 address dest = ic_destination(); 371 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || 372 dest == SharedRuntime::get_resolve_virtual_call_stub(); 373 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); 374 return is_clean; 375 } 376 377 378 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { 379 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 380 // Updating a cache to the wrong entry can cause bugs that are very hard 381 // to track down - if cache entry gets invalid - we just clean it. In 382 // this way it is always the same code path that is responsible for 383 // updating and resolving an inline cache 384 // 385 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 386 // callsites. In addition ic_miss code will update a site to monomorphic if it determines 387 // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 388 // 389 // In both of these cases the only thing being modifed is the jump/call target and these 390 // transitions are mt_safe 391 392 Thread *thread = Thread::current(); 393 if (info.to_interpreter()) { 394 // Call to interpreter 395 if (info.is_optimized() && is_optimized()) { 396 assert(is_clean(), "unsafe IC path"); 397 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 398 // the call analysis (callee structure) specifies that the call is optimized 399 // (either because of CHA or the static target is final) 400 // At code generation time, this call has been emitted as static call 401 // Call via stub 402 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); 403 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); 404 methodHandle method (thread, (Method*)info.cached_metadata()); 405 csc->set_to_interpreted(method, info.entry()); 406 if (TraceICs) { 407 ResourceMark rm(thread); 408 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 409 p2i(instruction_address()), 410 method->print_value_string()); 411 } 412 } else { 413 // Call via method-klass-holder 414 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); 415 if (TraceICs) { 416 ResourceMark rm(thread); 417 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address())); 418 } 419 } 420 } else { 421 // Call to compiled code 422 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); 423 #ifdef ASSERT 424 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 425 assert (cb->is_nmethod(), "must be compiled!"); 426 #endif /* ASSERT */ 427 428 // This is MT safe if we come from a clean-cache and go through a 429 // non-verified entry point 430 bool safe = SafepointSynchronize::is_at_safepoint() || 431 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 432 433 if (!safe) { 434 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); 435 } else { 436 if (is_optimized()) { 437 set_ic_destination(info.entry()); 438 } else { 439 set_ic_destination_and_value(info.entry(), info.cached_metadata()); 440 } 441 } 442 443 if (TraceICs) { 444 ResourceMark rm(thread); 445 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); 446 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", 447 p2i(instruction_address()), 448 ((Klass*)info.cached_metadata())->print_value_string(), 449 (safe) ? "" : "via stub"); 450 } 451 } 452 // We can't check this anymore. With lazy deopt we could have already 453 // cleaned this IC entry before we even return. This is possible if 454 // we ran out of space in the inline cache buffer trying to do the 455 // set_next and we safepointed to free up space. This is a benign 456 // race because the IC entry was complete when we safepointed so 457 // cleaning it immediately is harmless. 458 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 459 } 460 461 462 // is_optimized: Compiler has generated an optimized call (i.e., no inline 463 // cache) static_bound: The call can be static bound (i.e, no need to use 464 // inline cache) 465 void CompiledIC::compute_monomorphic_entry(methodHandle method, 466 KlassHandle receiver_klass, 467 bool is_optimized, 468 bool static_bound, 469 CompiledICInfo& info, 470 TRAPS) { 471 nmethod* method_code = method->code(); 472 address entry = NULL; 473 if (method_code != NULL && method_code->is_in_use()) { 474 // Call to compiled code 475 if (static_bound || is_optimized) { 476 entry = method_code->verified_entry_point(); 477 } else { 478 entry = method_code->entry_point(); 479 } 480 } 481 if (entry != NULL) { 482 // Call to compiled code 483 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized); 484 } else { 485 // Note: the following problem exists with Compiler1: 486 // - at compile time we may or may not know if the destination is final 487 // - if we know that the destination is final, we will emit an optimized 488 // virtual call (no inline cache), and need a Method* to make a call 489 // to the interpreter 490 // - if we do not know if the destination is final, we emit a standard 491 // virtual call, and use CompiledICHolder to call interpreted code 492 // (no static call stub has been generated) 493 // However in that case we will now notice it is static_bound 494 // and convert the call into what looks to be an optimized 495 // virtual call. This causes problems in verifying the IC because 496 // it look vanilla but is optimized. Code in is_call_to_interpreted 497 // is aware of this and weakens its asserts. 498 499 // static_bound should imply is_optimized -- otherwise we have a 500 // performance bug (statically-bindable method is called via 501 // dynamically-dispatched call note: the reverse implication isn't 502 // necessarily true -- the call may have been optimized based on compiler 503 // analysis (static_bound is only based on "final" etc.) 504 #ifdef COMPILER2 505 #ifdef TIERED 506 #if defined(ASSERT) 507 // can't check the assert because we don't have the CompiledIC with which to 508 // find the address if the call instruction. 509 // 510 // CodeBlob* cb = find_blob_unsafe(instruction_address()); 511 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); 512 #endif // ASSERT 513 #else 514 assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); 515 #endif // TIERED 516 #endif // COMPILER2 517 if (is_optimized) { 518 // Use stub entry 519 info.set_interpreter_entry(method()->get_c2i_entry(), method()); 520 } else { 521 // Use icholder entry 522 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass()); 523 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); 524 } 525 } 526 assert(info.is_optimized() == is_optimized, "must agree"); 527 } 528 529 530 bool CompiledIC::is_icholder_entry(address entry) { 531 CodeBlob* cb = CodeCache::find_blob_unsafe(entry); 532 return (cb != NULL && cb->is_adapter_blob()); 533 } 534 535 // ---------------------------------------------------------------------------- 536 537 void CompiledStaticCall::set_to_clean() { 538 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 539 // Reset call site 540 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); 541 #ifdef ASSERT 542 CodeBlob* cb = CodeCache::find_blob_unsafe(this); 543 assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 544 #endif 545 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); 546 547 // Do not reset stub here: It is too expensive to call find_stub. 548 // Instead, rely on caller (nmethod::clear_inline_caches) to clear 549 // both the call and its stub. 550 } 551 552 553 bool CompiledStaticCall::is_clean() const { 554 return destination() == SharedRuntime::get_resolve_static_call_stub(); 555 } 556 557 bool CompiledStaticCall::is_call_to_compiled() const { 558 return CodeCache::contains(destination()); 559 } 560 561 562 bool CompiledStaticCall::is_call_to_interpreted() const { 563 // It is a call to interpreted, if it calls to a stub. Hence, the destination 564 // must be in the stub part of the nmethod that contains the call 565 nmethod* nm = CodeCache::find_nmethod(instruction_address()); 566 return nm->stub_contains(destination()); 567 } 568 569 void CompiledStaticCall::set(const StaticCallInfo& info) { 570 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 571 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 572 // Updating a cache to the wrong entry can cause bugs that are very hard 573 // to track down - if cache entry gets invalid - we just clean it. In 574 // this way it is always the same code path that is responsible for 575 // updating and resolving an inline cache 576 assert(is_clean(), "do not update a call entry - use clean"); 577 578 if (info._to_interpreter) { 579 // Call to interpreted code 580 set_to_interpreted(info.callee(), info.entry()); 581 } else { 582 if (TraceICs) { 583 ResourceMark rm; 584 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 585 p2i(instruction_address()), 586 p2i(info.entry())); 587 } 588 // Call to compiled code 589 assert (CodeCache::contains(info.entry()), "wrong entry point"); 590 set_destination_mt_safe(info.entry()); 591 } 592 } 593 594 595 // Compute settings for a CompiledStaticCall. Since we might have to set 596 // the stub when calling to the interpreter, we need to return arguments. 597 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { 598 nmethod* m_code = m->code(); 599 info._callee = m; 600 if (m_code != NULL && m_code->is_in_use()) { 601 info._to_interpreter = false; 602 info._entry = m_code->verified_entry_point(); 603 } else { 604 // Callee is interpreted code. In any case entering the interpreter 605 // puts a converter-frame on the stack to save arguments. 606 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics"); 607 info._to_interpreter = true; 608 info._entry = m()->get_c2i_entry(); 609 } 610 } 611 612 address CompiledStaticCall::find_stub() { 613 // Find reloc. information containing this call-site 614 RelocIterator iter((nmethod*)NULL, instruction_address()); 615 while (iter.next()) { 616 if (iter.addr() == instruction_address()) { 617 switch(iter.type()) { 618 case relocInfo::static_call_type: 619 return iter.static_call_reloc()->static_stub(); 620 // We check here for opt_virtual_call_type, since we reuse the code 621 // from the CompiledIC implementation 622 case relocInfo::opt_virtual_call_type: 623 return iter.opt_virtual_call_reloc()->static_stub(); 624 case relocInfo::poll_type: 625 case relocInfo::poll_return_type: // A safepoint can't overlap a call. 626 default: 627 ShouldNotReachHere(); 628 } 629 } 630 } 631 return NULL; 632 } 633 634 635 //----------------------------------------------------------------------------- 636 // Non-product mode code 637 #ifndef PRODUCT 638 639 void CompiledIC::verify() { 640 // make sure code pattern is actually a call imm32 instruction 641 _ic_call->verify(); 642 if (os::is_MP()) { 643 _ic_call->verify_alignment(); 644 } 645 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 646 || is_optimized() || is_megamorphic(), "sanity check"); 647 } 648 649 void CompiledIC::print() { 650 print_compiled_ic(); 651 tty->cr(); 652 } 653 654 void CompiledIC::print_compiled_ic() { 655 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, 656 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); 657 } 658 659 void CompiledStaticCall::print() { 660 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); 661 if (is_clean()) { 662 tty->print("clean"); 663 } else if (is_call_to_compiled()) { 664 tty->print("compiled"); 665 } else if (is_call_to_interpreted()) { 666 tty->print("interpreted"); 667 } 668 tty->cr(); 669 } 670 671 #endif // !PRODUCT