1 /* 2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/compiledIC.hpp" 27 #include "code/compiledMethod.inline.hpp" 28 #include "code/scopeDesc.hpp" 29 #include "code/codeCache.hpp" 30 #include "code/icBuffer.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/gcBehaviours.hpp" 33 #include "interpreter/bytecode.inline.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logTag.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/methodData.hpp" 38 #include "oops/method.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, 45 int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, 46 bool caller_must_gc_arguments) 47 : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 48 _mark_for_deoptimization_status(not_marked), 49 _method(method), 50 _gc_data(NULL) 51 { 52 init_defaults(); 53 } 54 55 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, 56 int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, 57 OopMapSet* oop_maps, bool caller_must_gc_arguments) 58 : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, 59 frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), 60 _mark_for_deoptimization_status(not_marked), 61 _method(method), 62 _gc_data(NULL) 63 { 64 init_defaults(); 65 } 66 67 void CompiledMethod::init_defaults() { 68 _has_unsafe_access = 0; 69 _has_method_handle_invokes = 0; 70 _lazy_critical_native = 0; 71 _has_wide_vectors = 0; 72 } 73 74 bool CompiledMethod::is_method_handle_return(address return_pc) { 75 if (!has_method_handle_invokes()) return false; 76 PcDesc* pd = pc_desc_at(return_pc); 77 if (pd == NULL) 78 return false; 79 return pd->is_method_handle_invoke(); 80 } 81 82 // Returns a string version of the method state. 83 const char* CompiledMethod::state() const { 84 int state = get_state(); 85 switch (state) { 86 case not_installed: 87 return "not installed"; 88 case in_use: 89 return "in use"; 90 case not_used: 91 return "not_used"; 92 case not_entrant: 93 return "not_entrant"; 94 case zombie: 95 return "zombie"; 96 case unloaded: 97 return "unloaded"; 98 default: 99 fatal("unexpected method state: %d", state); 100 return NULL; 101 } 102 } 103 104 //----------------------------------------------------------------------------- 105 106 ExceptionCache* CompiledMethod::exception_cache_acquire() const { 107 return OrderAccess::load_acquire(&_exception_cache); 108 } 109 110 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { 111 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock"); 112 assert(new_entry != NULL,"Must be non null"); 113 assert(new_entry->next() == NULL, "Must be null"); 114 115 for (;;) { 116 ExceptionCache *ec = exception_cache(); 117 if (ec != NULL) { 118 Klass* ex_klass = ec->exception_type(); 119 if (!ex_klass->is_loader_alive()) { 120 // We must guarantee that entries are not inserted with new next pointer 121 // edges to ExceptionCache entries with dead klasses, due to bad interactions 122 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll 123 // the head pointer forward to the first live ExceptionCache, so that the new 124 // next pointers always point at live ExceptionCaches, that are not removed due 125 // to concurrent ExceptionCache cleanup. 126 ExceptionCache* next = ec->next(); 127 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { 128 CodeCache::release_exception_cache(ec); 129 } 130 continue; 131 } 132 ec = exception_cache(); 133 if (ec != NULL) { 134 new_entry->set_next(ec); 135 } 136 } 137 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { 138 return; 139 } 140 } 141 } 142 143 void CompiledMethod::clean_exception_cache() { 144 // For each nmethod, only a single thread may call this cleanup function 145 // at the same time, whether called in STW cleanup or concurrent cleanup. 146 // Note that if the GC is processing exception cache cleaning in a concurrent phase, 147 // then a single writer may contend with cleaning up the head pointer to the 148 // first ExceptionCache node that has a Klass* that is alive. That is fine, 149 // as long as there is no concurrent cleanup of next pointers from concurrent writers. 150 // And the concurrent writers do not clean up next pointers, only the head. 151 // Also note that concurent readers will walk through Klass* pointers that are not 152 // alive. That does not cause ABA problems, because Klass* is deleted after 153 // a handshake with all threads, after all stale ExceptionCaches have been 154 // unlinked. That is also when the CodeCache::exception_cache_purge_list() 155 // is deleted, with all ExceptionCache entries that were cleaned concurrently. 156 // That similarly implies that CAS operations on ExceptionCache entries do not 157 // suffer from ABA problems as unlinking and deletion is separated by a global 158 // handshake operation. 159 ExceptionCache* prev = NULL; 160 ExceptionCache* curr = exception_cache_acquire(); 161 162 while (curr != NULL) { 163 ExceptionCache* next = curr->next(); 164 165 if (!curr->exception_type()->is_loader_alive()) { 166 if (prev == NULL) { 167 // Try to clean head; this is contended by concurrent inserts, that 168 // both lazily clean the head, and insert entries at the head. If 169 // the CAS fails, the operation is restarted. 170 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { 171 prev = NULL; 172 curr = exception_cache_acquire(); 173 continue; 174 } 175 } else { 176 // It is impossible to during cleanup connect the next pointer to 177 // an ExceptionCache that has not been published before a safepoint 178 // prior to the cleanup. Therefore, release is not required. 179 prev->set_next(next); 180 } 181 // prev stays the same. 182 183 CodeCache::release_exception_cache(curr); 184 } else { 185 prev = curr; 186 } 187 188 curr = next; 189 } 190 } 191 192 // public method for accessing the exception cache 193 // These are the public access methods. 194 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { 195 // We never grab a lock to read the exception cache, so we may 196 // have false negatives. This is okay, as it can only happen during 197 // the first few exception lookups for a given nmethod. 198 ExceptionCache* ec = exception_cache_acquire(); 199 while (ec != NULL) { 200 address ret_val; 201 if ((ret_val = ec->match(exception,pc)) != NULL) { 202 return ret_val; 203 } 204 ec = ec->next(); 205 } 206 return NULL; 207 } 208 209 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 210 // There are potential race conditions during exception cache updates, so we 211 // must own the ExceptionCache_lock before doing ANY modifications. Because 212 // we don't lock during reads, it is possible to have several threads attempt 213 // to update the cache with the same data. We need to check for already inserted 214 // copies of the current data before adding it. 215 216 MutexLocker ml(ExceptionCache_lock); 217 ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); 218 219 if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { 220 target_entry = new ExceptionCache(exception,pc,handler); 221 add_exception_cache_entry(target_entry); 222 } 223 } 224 225 // private method for handling exception cache 226 // These methods are private, and used to manipulate the exception cache 227 // directly. 228 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { 229 ExceptionCache* ec = exception_cache_acquire(); 230 while (ec != NULL) { 231 if (ec->match_exception_with_space(exception)) { 232 return ec; 233 } 234 ec = ec->next(); 235 } 236 return NULL; 237 } 238 239 //-------------end of code for ExceptionCache-------------- 240 241 bool CompiledMethod::is_at_poll_return(address pc) { 242 RelocIterator iter(this, pc, pc+1); 243 while (iter.next()) { 244 if (iter.type() == relocInfo::poll_return_type) 245 return true; 246 } 247 return false; 248 } 249 250 251 bool CompiledMethod::is_at_poll_or_poll_return(address pc) { 252 RelocIterator iter(this, pc, pc+1); 253 while (iter.next()) { 254 relocInfo::relocType t = iter.type(); 255 if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) 256 return true; 257 } 258 return false; 259 } 260 261 void CompiledMethod::verify_oop_relocations() { 262 // Ensure sure that the code matches the current oop values 263 RelocIterator iter(this, NULL, NULL); 264 while (iter.next()) { 265 if (iter.type() == relocInfo::oop_type) { 266 oop_Relocation* reloc = iter.oop_reloc(); 267 if (!reloc->oop_is_immediate()) { 268 reloc->verify_oop_relocation(); 269 } 270 } 271 } 272 } 273 274 275 ScopeDesc* CompiledMethod::scope_desc_at(address pc) { 276 PcDesc* pd = pc_desc_at(pc); 277 guarantee(pd != NULL, "scope must be present"); 278 return new ScopeDesc(this, pd->scope_decode_offset(), 279 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 280 pd->return_oop(), pd->return_vt()); 281 } 282 283 ScopeDesc* CompiledMethod::scope_desc_near(address pc) { 284 PcDesc* pd = pc_desc_near(pc); 285 guarantee(pd != NULL, "scope must be present"); 286 return new ScopeDesc(this, pd->scope_decode_offset(), 287 pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), 288 pd->return_oop(), pd->return_vt()); 289 } 290 291 address CompiledMethod::oops_reloc_begin() const { 292 // If the method is not entrant or zombie then a JMP is plastered over the 293 // first few bytes. If an oop in the old code was there, that oop 294 // should not get GC'd. Skip the first few bytes of oops on 295 // not-entrant methods. 296 if (frame_complete_offset() != CodeOffsets::frame_never_safe && 297 code_begin() + frame_complete_offset() > 298 verified_entry_point() + NativeJump::instruction_size) 299 { 300 // If we have a frame_complete_offset after the native jump, then there 301 // is no point trying to look for oops before that. This is a requirement 302 // for being allowed to scan oops concurrently. 303 return code_begin() + frame_complete_offset(); 304 } 305 306 // It is not safe to read oops concurrently using entry barriers, if their 307 // location depend on whether the nmethod is entrant or not. 308 assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan"); 309 310 address low_boundary = verified_entry_point(); 311 if (!is_in_use() && is_nmethod()) { 312 low_boundary += NativeJump::instruction_size; 313 // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. 314 // This means that the low_boundary is going to be a little too high. 315 // This shouldn't matter, since oops of non-entrant methods are never used. 316 // In fact, why are we bothering to look at oops in a non-entrant method?? 317 } 318 return low_boundary; 319 } 320 321 int CompiledMethod::verify_icholder_relocations() { 322 ResourceMark rm; 323 int count = 0; 324 325 RelocIterator iter(this); 326 while(iter.next()) { 327 if (iter.type() == relocInfo::virtual_call_type) { 328 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { 329 CompiledIC *ic = CompiledIC_at(&iter); 330 if (TraceCompiledIC) { 331 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); 332 ic->print(); 333 } 334 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 335 count++; 336 } 337 } 338 } 339 340 return count; 341 } 342 343 // Method that knows how to preserve outgoing arguments at call. This method must be 344 // called with a frame corresponding to a Java invoke 345 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { 346 if (method() != NULL && !method()->is_native()) { 347 address pc = fr.pc(); 348 SimpleScopeDesc ssd(this, pc); 349 Bytecode_invoke call(ssd.method(), ssd.bci()); 350 bool has_receiver = call.has_receiver(); 351 bool has_appendix = call.has_appendix(); 352 Symbol* signature = call.signature(); 353 354 // The method attached by JIT-compilers should be used, if present. 355 // Bytecode can be inaccurate in such case. 356 Method* callee = attached_method_before_pc(pc); 357 if (callee != NULL) { 358 has_receiver = !(callee->access_flags().is_static()); 359 has_appendix = false; 360 signature = callee->signature(); 361 362 // If value types are passed as fields, use the extended signature 363 // which contains the types of all (oop) fields of the value type. 364 if (callee->has_scalarized_args()) { 365 const GrowableArray<SigEntry>* sig = callee->adapter()->get_sig_cc(); 366 signature = SigEntry::create_symbol(sig); 367 has_receiver = false; // The extended signature contains the receiver type 368 } 369 } 370 371 fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); 372 } 373 } 374 375 Method* CompiledMethod::attached_method(address call_instr) { 376 assert(code_contains(call_instr), "not part of the nmethod"); 377 RelocIterator iter(this, call_instr, call_instr + 1); 378 while (iter.next()) { 379 if (iter.addr() == call_instr) { 380 switch(iter.type()) { 381 case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); 382 case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); 383 case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); 384 default: break; 385 } 386 } 387 } 388 return NULL; // not found 389 } 390 391 Method* CompiledMethod::attached_method_before_pc(address pc) { 392 if (NativeCall::is_call_before(pc)) { 393 NativeCall* ncall = nativeCall_before(pc); 394 return attached_method(ncall->instruction_address()); 395 } 396 return NULL; // not a call 397 } 398 399 void CompiledMethod::clear_inline_caches() { 400 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); 401 if (is_zombie()) { 402 return; 403 } 404 405 RelocIterator iter(this); 406 while (iter.next()) { 407 iter.reloc()->clear_inline_cache(); 408 } 409 } 410 411 // Clear ICStubs of all compiled ICs 412 void CompiledMethod::clear_ic_stubs() { 413 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 414 ResourceMark rm; 415 RelocIterator iter(this); 416 while(iter.next()) { 417 if (iter.type() == relocInfo::virtual_call_type) { 418 CompiledIC* ic = CompiledIC_at(&iter); 419 ic->clear_ic_stub(); 420 } 421 } 422 } 423 424 #ifdef ASSERT 425 // Check class_loader is alive for this bit of metadata. 426 static void check_class(Metadata* md) { 427 Klass* klass = NULL; 428 if (md->is_klass()) { 429 klass = ((Klass*)md); 430 } else if (md->is_method()) { 431 klass = ((Method*)md)->method_holder(); 432 } else if (md->is_methodData()) { 433 klass = ((MethodData*)md)->method()->method_holder(); 434 } else { 435 md->print(); 436 ShouldNotReachHere(); 437 } 438 assert(klass->is_loader_alive(), "must be alive"); 439 } 440 #endif // ASSERT 441 442 443 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { 444 if (ic->is_clean()) { 445 return true; 446 } 447 if (ic->is_icholder_call()) { 448 // The only exception is compiledICHolder metdata which may 449 // yet be marked below. (We check this further below). 450 CompiledICHolder* cichk_metdata = ic->cached_icholder(); 451 452 if (cichk_metdata->is_loader_alive()) { 453 return true; 454 } 455 } else { 456 Metadata* ic_metdata = ic->cached_metadata(); 457 if (ic_metdata != NULL) { 458 if (ic_metdata->is_klass()) { 459 if (((Klass*)ic_metdata)->is_loader_alive()) { 460 return true; 461 } 462 } else if (ic_metdata->is_method()) { 463 Method* method = (Method*)ic_metdata; 464 assert(!method->is_old(), "old method should have been cleaned"); 465 if (method->method_holder()->is_loader_alive()) { 466 return true; 467 } 468 } else { 469 ShouldNotReachHere(); 470 } 471 } 472 } 473 474 return ic->set_to_clean(); 475 } 476 477 // static_stub_Relocations may have dangling references to 478 // nmethods so trim them out here. Otherwise it looks like 479 // compiled code is maintaining a link to dead metadata. 480 void CompiledMethod::clean_ic_stubs() { 481 #ifdef ASSERT 482 address low_boundary = oops_reloc_begin(); 483 RelocIterator iter(this, low_boundary); 484 while (iter.next()) { 485 address static_call_addr = NULL; 486 if (iter.type() == relocInfo::opt_virtual_call_type) { 487 CompiledIC* cic = CompiledIC_at(&iter); 488 if (!cic->is_call_to_interpreted()) { 489 static_call_addr = iter.addr(); 490 } 491 } else if (iter.type() == relocInfo::static_call_type) { 492 CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc()); 493 if (!csc->is_call_to_interpreted()) { 494 static_call_addr = iter.addr(); 495 } 496 } 497 if (static_call_addr != NULL) { 498 RelocIterator sciter(this, low_boundary); 499 while (sciter.next()) { 500 if (sciter.type() == relocInfo::static_stub_type && 501 sciter.static_stub_reloc()->static_call() == static_call_addr) { 502 sciter.static_stub_reloc()->clear_inline_cache(); 503 } 504 } 505 } 506 } 507 #endif 508 } 509 510 // Clean references to unloaded nmethods at addr from this one, which is not unloaded. 511 template <class CompiledICorStaticCall> 512 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, 513 bool clean_all) { 514 // Ok, to lookup references to zombies here 515 CodeBlob *cb = CodeCache::find_blob_unsafe(addr); 516 CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 517 if (nm != NULL) { 518 // Clean inline caches pointing to both zombie and not_entrant methods 519 if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { 520 if (!ic->set_to_clean(from->is_alive())) { 521 return false; 522 } 523 assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string()); 524 } 525 } 526 return true; 527 } 528 529 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, 530 bool clean_all) { 531 return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); 532 } 533 534 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, 535 bool clean_all) { 536 return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); 537 } 538 539 // Cleans caches in nmethods that point to either classes that are unloaded 540 // or nmethods that are unloaded. 541 // 542 // Can be called either in parallel by G1 currently or after all 543 // nmethods are unloaded. Return postponed=true in the parallel case for 544 // inline caches found that point to nmethods that are not yet visited during 545 // the do_unloading walk. 546 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { 547 ResourceMark rm; 548 549 // Exception cache only needs to be called if unloading occurred 550 if (unloading_occurred) { 551 clean_exception_cache(); 552 } 553 554 if (!cleanup_inline_caches_impl(unloading_occurred, false)) { 555 return false; 556 } 557 558 // All static stubs need to be cleaned. 559 clean_ic_stubs(); 560 561 // Check that the metadata embedded in the nmethod is alive 562 DEBUG_ONLY(metadata_do(check_class)); 563 return true; 564 } 565 566 void CompiledMethod::cleanup_inline_caches(bool clean_all) { 567 for (;;) { 568 ICRefillVerifier ic_refill_verifier; 569 { CompiledICLocker ic_locker(this); 570 if (cleanup_inline_caches_impl(false, clean_all)) { 571 return; 572 } 573 } 574 InlineCacheBuffer::refill_ic_stubs(); 575 } 576 } 577 578 // Called to clean up after class unloading for live nmethods and from the sweeper 579 // for all methods. 580 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { 581 assert(CompiledICLocker::is_safe(this), "mt unsafe call"); 582 ResourceMark rm; 583 584 // Find all calls in an nmethod and clear the ones that point to non-entrant, 585 // zombie and unloaded nmethods. 586 RelocIterator iter(this, oops_reloc_begin()); 587 while(iter.next()) { 588 589 switch (iter.type()) { 590 591 case relocInfo::virtual_call_type: 592 if (unloading_occurred) { 593 // If class unloading occurred we first clear ICs where the cached metadata 594 // is referring to an unloaded klass or method. 595 if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { 596 return false; 597 } 598 } 599 600 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 601 return false; 602 } 603 break; 604 605 case relocInfo::opt_virtual_call_type: 606 if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { 607 return false; 608 } 609 break; 610 611 case relocInfo::static_call_type: 612 if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { 613 return false; 614 } 615 break; 616 617 default: 618 break; 619 } 620 } 621 622 return true; 623 } 624 625 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found 626 // to not be inherently safe. There is a chance that fields are seen which are not properly 627 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock 628 // to be held. 629 // To bundle knowledge about necessary checks in one place, this function was introduced. 630 // It is not claimed that these checks are sufficient, but they were found to be necessary. 631 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) { 632 Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid 633 return (nm != NULL) && (method != NULL) && (method->signature() != NULL) && 634 !nm->is_zombie() && !nm->is_not_installed() && 635 os::is_readable_pointer(method) && 636 os::is_readable_pointer(method->constants()) && 637 os::is_readable_pointer(method->signature()); 638 }