src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8015774 Sdiff src/share/vm/code

src/share/vm/code/nmethod.cpp

Print this page




 469 }
 470 
 471 nmethod* nmethod::new_native_nmethod(methodHandle method,
 472   int compile_id,
 473   CodeBuffer *code_buffer,
 474   int vep_offset,
 475   int frame_complete,
 476   int frame_size,
 477   ByteSize basic_lock_owner_sp_offset,
 478   ByteSize basic_lock_sp_offset,
 479   OopMapSet* oop_maps) {
 480   code_buffer->finalize_oop_references(method);
 481   // create nmethod
 482   nmethod* nm = NULL;
 483   {
 484     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 485     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 486     CodeOffsets offsets;
 487     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 488     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 489     nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
 490                                             compile_id, &offsets,
 491                                             code_buffer, frame_size,
 492                                             basic_lock_owner_sp_offset,
 493                                             basic_lock_sp_offset, oop_maps);
 494     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 495     if (PrintAssembly && nm != NULL) {
 496       Disassembler::decode(nm);
 497     }
 498   }
 499   // verify nmethod
 500   debug_only(if (nm) nm->verify();) // might block
 501 
 502   if (nm != NULL) {
 503     nm->log_new_nmethod();
 504   }
 505 
 506   return nm;
 507 }
 508 
 509 #ifdef HAVE_DTRACE_H
 510 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 511                                      CodeBuffer *code_buffer,
 512                                      int vep_offset,
 513                                      int trap_offset,
 514                                      int frame_complete,
 515                                      int frame_size) {
 516   code_buffer->finalize_oop_references(method);
 517   // create nmethod
 518   nmethod* nm = NULL;
 519   {
 520     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 521     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 522     CodeOffsets offsets;
 523     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 524     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 525     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 526 
 527     nm = new (nmethod_size) nmethod(method(), nmethod_size,
 528                                     &offsets, code_buffer, frame_size);
 529 
 530     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 531     if (PrintAssembly && nm != NULL) {
 532       Disassembler::decode(nm);
 533     }
 534   }
 535   // verify nmethod
 536   debug_only(if (nm) nm->verify();) // might block
 537 
 538   if (nm != NULL) {
 539     nm->log_new_nmethod();
 540   }
 541 
 542   return nm;
 543 }
 544 
 545 #endif // def HAVE_DTRACE_H
 546 
 547 nmethod* nmethod::new_nmethod(methodHandle method,


 555   OopMapSet* oop_maps,
 556   ExceptionHandlerTable* handler_table,
 557   ImplicitExceptionTable* nul_chk_table,
 558   AbstractCompiler* compiler,
 559   int comp_level
 560 )
 561 {
 562   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 563   code_buffer->finalize_oop_references(method);
 564   // create nmethod
 565   nmethod* nm = NULL;
 566   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 567     int nmethod_size =
 568       allocation_size(code_buffer, sizeof(nmethod))
 569       + adjust_pcs_size(debug_info->pcs_size())
 570       + round_to(dependencies->size_in_bytes() , oopSize)
 571       + round_to(handler_table->size_in_bytes(), oopSize)
 572       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 573       + round_to(debug_info->data_size()       , oopSize);
 574 
 575     nm = new (nmethod_size)
 576     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 577             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 578             oop_maps,
 579             handler_table,
 580             nul_chk_table,
 581             compiler,
 582             comp_level);
 583 
 584     if (nm != NULL) {
 585       // To make dependency checking during class loading fast, record
 586       // the nmethod dependencies in the classes it is dependent on.
 587       // This allows the dependency checking code to simply walk the
 588       // class hierarchy above the loaded class, checking only nmethods
 589       // which are dependent on those classes.  The slow way is to
 590       // check every nmethod for dependencies which makes it linear in
 591       // the number of methods compiled.  For applications with a lot
 592       // classes the slow way is too slow.
 593       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 594         Klass* klass = deps.context_type();
 595         if (klass == NULL) {


 764       xtty->method(_method);
 765       xtty->stamp();
 766       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 767     }
 768     // print the header part first
 769     print();
 770     // then print the requested information
 771     if (PrintNMethods) {
 772       print_code();
 773     }
 774     if (PrintRelocations) {
 775       print_relocations();
 776     }
 777     if (xtty != NULL) {
 778       xtty->tail("print_dtrace_nmethod");
 779     }
 780   }
 781 }
 782 #endif // def HAVE_DTRACE_H
 783 
 784 void* nmethod::operator new(size_t size, int nmethod_size) throw() {
 785   // Not critical, may return null if there is too little continuous memory
 786   return CodeCache::allocate(nmethod_size);

 787 }
 788 
 789 nmethod::nmethod(
 790   Method* method,
 791   int nmethod_size,
 792   int compile_id,
 793   int entry_bci,
 794   CodeOffsets* offsets,
 795   int orig_pc_offset,
 796   DebugInformationRecorder* debug_info,
 797   Dependencies* dependencies,
 798   CodeBuffer *code_buffer,
 799   int frame_size,
 800   OopMapSet* oop_maps,
 801   ExceptionHandlerTable* handler_table,
 802   ImplicitExceptionTable* nul_chk_table,
 803   AbstractCompiler* compiler,
 804   int comp_level
 805   )
 806   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1399   if (TraceCreateZombies) {
1400     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1401   }
1402 
1403   NMethodSweeper::report_state_change(this);
1404   return true;
1405 }
1406 
1407 void nmethod::flush() {
1408   // Note that there are no valid oops in the nmethod anymore.
1409   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1410   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1411 
1412   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1413   assert_locked_or_safepoint(CodeCache_lock);
1414 
1415   // completely deallocate this method
1416   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1417   if (PrintMethodFlushing) {
1418     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1419         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
1420   }
1421 
1422   // We need to deallocate any ExceptionCache data.
1423   // Note that we do not need to grab the nmethod lock for this, it
1424   // better be thread safe if we're disposing of it!
1425   ExceptionCache* ec = exception_cache();
1426   set_exception_cache(NULL);
1427   while(ec != NULL) {
1428     ExceptionCache* next = ec->next();
1429     delete ec;
1430     ec = next;
1431   }
1432 
1433   if (on_scavenge_root_list()) {
1434     CodeCache::drop_scavenge_root_nmethod(this);
1435   }
1436 
1437 #ifdef SHARK
1438   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1439 #endif // SHARK
1440 
1441   ((CodeBlob*)(this))->flush();
1442 
1443   CodeCache::free(this);
1444 }
1445 
1446 
1447 //
1448 // Notify all classes this nmethod is dependent on that it is no
1449 // longer dependent. This should only be called in two situations.
1450 // First, when a nmethod transitions to a zombie all dependents need
1451 // to be clear.  Since zombification happens at a safepoint there's no
1452 // synchronization issues.  The second place is a little more tricky.
1453 // During phase 1 of mark sweep class unloading may happen and as a
1454 // result some nmethods may get unloaded.  In this case the flushing
1455 // of dependencies must happen during phase 1 since after GC any
1456 // dependencies in the unloaded nmethod won't be updated, so
1457 // traversing the dependency information in unsafe.  In that case this
1458 // function is called with a non-NULL argument and this function only
1459 // notifies instanceKlasses that are reachable
1460 
1461 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1462   assert_locked_or_safepoint(CodeCache_lock);
1463   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1464   "is_alive is non-NULL if and only if we are called during GC");
1465   if (!has_flushed_dependencies()) {
1466     set_has_flushed_dependencies();


2128     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2129     return NULL;
2130   }
2131 }
2132 
2133 
2134 void nmethod::check_all_dependencies(DepChange& changes) {
2135   // Checked dependencies are allocated into this ResourceMark
2136   ResourceMark rm;
2137 
2138   // Turn off dependency tracing while actually testing dependencies.
2139   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2140 
2141  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2142                            &DependencySignature::equals, 11027> DepTable;
2143 
2144  DepTable* table = new DepTable();
2145 
2146   // Iterate over live nmethods and check dependencies of all nmethods that are not
2147   // marked for deoptimization. A particular dependency is only checked once.
2148   for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {



2149     if (!nm->is_marked_for_deoptimization()) {
2150       for (Dependencies::DepStream deps(nm); deps.next(); ) {
2151         // Construct abstraction of a dependency.
2152         DependencySignature* current_sig = new DependencySignature(deps);
2153 
2154         // Determine if dependency is already checked. table->put(...) returns
2155         // 'true' if the dependency is added (i.e., was not in the hashtable).
2156         if (table->put(*current_sig, 1)) {
2157           if (deps.check_dependency() != NULL) {
2158             // Dependency checking failed. Print out information about the failed
2159             // dependency and finally fail with an assert. We can fail here, since
2160             // dependency checking is never done in a product build.
2161             changes.print();
2162             nm->print();
2163             nm->print_dependencies();
2164             assert(false, "Should have been marked for deoptimization");
2165           }
2166         }
2167       }
2168     }


2169   }
2170 }
2171 
2172 bool nmethod::check_dependency_on(DepChange& changes) {
2173   // What has happened:
2174   // 1) a new class dependee has been added
2175   // 2) dependee and all its super classes have been marked
2176   bool found_check = false;  // set true if we are upset
2177   for (Dependencies::DepStream deps(this); deps.next(); ) {
2178     // Evaluate only relevant dependencies.
2179     if (deps.spot_check_dependency_at(changes) != NULL) {
2180       found_check = true;
2181       NOT_DEBUG(break);
2182     }
2183   }
2184   return found_check;
2185 }
2186 
2187 bool nmethod::is_evol_dependent_on(Klass* dependee) {
2188   InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);


2342                   (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2343   }
2344   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2345 };
2346 
2347 void nmethod::verify() {
2348 
2349   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2350   // seems odd.
2351 
2352   if( is_zombie() || is_not_entrant() )
2353     return;
2354 
2355   // Make sure all the entry points are correctly aligned for patching.
2356   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2357 
2358   // assert(method()->is_oop(), "must be valid");
2359 
2360   ResourceMark rm;
2361 
2362   if (!CodeCache::contains(this)) {
2363     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2364   }
2365 
2366   if(is_native_method() )
2367     return;
2368 
2369   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2370   if (nm != this) {
2371     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2372                   this));
2373   }
2374 
2375   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2376     if (! p->verify(this)) {
2377       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2378     }
2379   }
2380 
2381   VerifyOopsClosure voc(this);
2382   oops_do(&voc);




 469 }
 470 
 471 nmethod* nmethod::new_native_nmethod(methodHandle method,
 472   int compile_id,
 473   CodeBuffer *code_buffer,
 474   int vep_offset,
 475   int frame_complete,
 476   int frame_size,
 477   ByteSize basic_lock_owner_sp_offset,
 478   ByteSize basic_lock_sp_offset,
 479   OopMapSet* oop_maps) {
 480   code_buffer->finalize_oop_references(method);
 481   // create nmethod
 482   nmethod* nm = NULL;
 483   {
 484     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 485     int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 486     CodeOffsets offsets;
 487     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 488     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 489     nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
 490                                             compile_id, &offsets,
 491                                             code_buffer, frame_size,
 492                                             basic_lock_owner_sp_offset,
 493                                             basic_lock_sp_offset, oop_maps);
 494     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
 495     if (PrintAssembly && nm != NULL) {
 496       Disassembler::decode(nm);
 497     }
 498   }
 499   // verify nmethod
 500   debug_only(if (nm) nm->verify();) // might block
 501 
 502   if (nm != NULL) {
 503     nm->log_new_nmethod();
 504   }
 505 
 506   return nm;
 507 }
 508 
 509 #ifdef HAVE_DTRACE_H
 510 nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
 511                                      CodeBuffer *code_buffer,
 512                                      int vep_offset,
 513                                      int trap_offset,
 514                                      int frame_complete,
 515                                      int frame_size) {
 516   code_buffer->finalize_oop_references(method);
 517   // create nmethod
 518   nmethod* nm = NULL;
 519   {
 520     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 521     int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
 522     CodeOffsets offsets;
 523     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
 524     offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
 525     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
 526 
 527     nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
 528                                     &offsets, code_buffer, frame_size);
 529 
 530     NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
 531     if (PrintAssembly && nm != NULL) {
 532       Disassembler::decode(nm);
 533     }
 534   }
 535   // verify nmethod
 536   debug_only(if (nm) nm->verify();) // might block
 537 
 538   if (nm != NULL) {
 539     nm->log_new_nmethod();
 540   }
 541 
 542   return nm;
 543 }
 544 
 545 #endif // def HAVE_DTRACE_H
 546 
 547 nmethod* nmethod::new_nmethod(methodHandle method,


 555   OopMapSet* oop_maps,
 556   ExceptionHandlerTable* handler_table,
 557   ImplicitExceptionTable* nul_chk_table,
 558   AbstractCompiler* compiler,
 559   int comp_level
 560 )
 561 {
 562   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
 563   code_buffer->finalize_oop_references(method);
 564   // create nmethod
 565   nmethod* nm = NULL;
 566   { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 567     int nmethod_size =
 568       allocation_size(code_buffer, sizeof(nmethod))
 569       + adjust_pcs_size(debug_info->pcs_size())
 570       + round_to(dependencies->size_in_bytes() , oopSize)
 571       + round_to(handler_table->size_in_bytes(), oopSize)
 572       + round_to(nul_chk_table->size_in_bytes(), oopSize)
 573       + round_to(debug_info->data_size()       , oopSize);
 574 
 575     nm = new (nmethod_size, comp_level)
 576     nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
 577             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
 578             oop_maps,
 579             handler_table,
 580             nul_chk_table,
 581             compiler,
 582             comp_level);
 583 
 584     if (nm != NULL) {
 585       // To make dependency checking during class loading fast, record
 586       // the nmethod dependencies in the classes it is dependent on.
 587       // This allows the dependency checking code to simply walk the
 588       // class hierarchy above the loaded class, checking only nmethods
 589       // which are dependent on those classes.  The slow way is to
 590       // check every nmethod for dependencies which makes it linear in
 591       // the number of methods compiled.  For applications with a lot
 592       // classes the slow way is too slow.
 593       for (Dependencies::DepStream deps(nm); deps.next(); ) {
 594         Klass* klass = deps.context_type();
 595         if (klass == NULL) {


 764       xtty->method(_method);
 765       xtty->stamp();
 766       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
 767     }
 768     // print the header part first
 769     print();
 770     // then print the requested information
 771     if (PrintNMethods) {
 772       print_code();
 773     }
 774     if (PrintRelocations) {
 775       print_relocations();
 776     }
 777     if (xtty != NULL) {
 778       xtty->tail("print_dtrace_nmethod");
 779     }
 780   }
 781 }
 782 #endif // def HAVE_DTRACE_H
 783 
 784 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
 785   // Nmethods are allocated on separate heaps and therefore do not share memory with critical CodeBlobs.
 786   // We nevertheless define the allocation as critical to make sure all heap memory is used.
 787   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), true);
 788 }
 789 
 790 nmethod::nmethod(
 791   Method* method,
 792   int nmethod_size,
 793   int compile_id,
 794   int entry_bci,
 795   CodeOffsets* offsets,
 796   int orig_pc_offset,
 797   DebugInformationRecorder* debug_info,
 798   Dependencies* dependencies,
 799   CodeBuffer *code_buffer,
 800   int frame_size,
 801   OopMapSet* oop_maps,
 802   ExceptionHandlerTable* handler_table,
 803   ImplicitExceptionTable* nul_chk_table,
 804   AbstractCompiler* compiler,
 805   int comp_level
 806   )
 807   : CodeBlob("nmethod", code_buffer, sizeof(nmethod),


1400   if (TraceCreateZombies) {
1401     tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1402   }
1403 
1404   NMethodSweeper::report_state_change(this);
1405   return true;
1406 }
1407 
1408 void nmethod::flush() {
1409   // Note that there are no valid oops in the nmethod anymore.
1410   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1411   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1412 
1413   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1414   assert_locked_or_safepoint(CodeCache_lock);
1415 
1416   // completely deallocate this method
1417   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1418   if (PrintMethodFlushing) {
1419     tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1420         _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
1421   }
1422 
1423   // We need to deallocate any ExceptionCache data.
1424   // Note that we do not need to grab the nmethod lock for this, it
1425   // better be thread safe if we're disposing of it!
1426   ExceptionCache* ec = exception_cache();
1427   set_exception_cache(NULL);
1428   while(ec != NULL) {
1429     ExceptionCache* next = ec->next();
1430     delete ec;
1431     ec = next;
1432   }
1433 
1434   if (on_scavenge_root_list()) {
1435     CodeCache::drop_scavenge_root_nmethod(this);
1436   }
1437 
1438 #ifdef SHARK
1439   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1440 #endif // SHARK
1441 
1442   ((CodeBlob*)(this))->flush();
1443 
1444   CodeCache::free(this, CodeCache::get_code_blob_type(_comp_level));
1445 }
1446 

1447 //
1448 // Notify all classes this nmethod is dependent on that it is no
1449 // longer dependent. This should only be called in two situations.
1450 // First, when a nmethod transitions to a zombie all dependents need
1451 // to be clear.  Since zombification happens at a safepoint there's no
1452 // synchronization issues.  The second place is a little more tricky.
1453 // During phase 1 of mark sweep class unloading may happen and as a
1454 // result some nmethods may get unloaded.  In this case the flushing
1455 // of dependencies must happen during phase 1 since after GC any
1456 // dependencies in the unloaded nmethod won't be updated, so
1457 // traversing the dependency information in unsafe.  In that case this
1458 // function is called with a non-NULL argument and this function only
1459 // notifies instanceKlasses that are reachable
1460 
1461 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1462   assert_locked_or_safepoint(CodeCache_lock);
1463   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1464   "is_alive is non-NULL if and only if we are called during GC");
1465   if (!has_flushed_dependencies()) {
1466     set_has_flushed_dependencies();


2128     assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2129     return NULL;
2130   }
2131 }
2132 
2133 
2134 void nmethod::check_all_dependencies(DepChange& changes) {
2135  // Checked dependencies are allocated into this ResourceMark
2136  ResourceMark rm;
2137 
2138  // Turn off dependency tracing while actually testing dependencies.
2139  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2140 
2141  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2142                            &DependencySignature::equals, 11027> DepTable;
2143 
2144  DepTable* table = new DepTable();
2145 
2146  // Iterate over live nmethods and check dependencies of all nmethods that are not
2147  // marked for deoptimization. A particular dependency is only checked once.
2148  for (int code_blob_type = CodeBlobType::MethodNonProfiled; code_blob_type <= CodeBlobType::MethodProfiled; ++code_blob_type) {
2149    // Only notify for live nmethods
2150    nmethod* nm = (nmethod*) CodeCache::first_alive_blob(code_blob_type);
2151    while (nm != NULL) {
2152      if (!nm->is_marked_for_deoptimization()) {
2153        for (Dependencies::DepStream deps(nm); deps.next(); ) {
2154          // Construct abstraction of a dependency.
2155          DependencySignature* current_sig = new DependencySignature(deps);
2156 
2157          // Determine if dependency is already checked. table->put(...) returns
2158          // 'true' if the dependency is added (i.e., was not in the hashtable).
2159          if (table->put(*current_sig, 1)) {
2160            if (deps.check_dependency() != NULL) {
2161              // Dependency checking failed. Print out information about the failed
2162              // dependency and finally fail with an assert. We can fail here, since
2163              // dependency checking is never done in a product build.
2164              changes.print();
2165              nm->print();
2166              nm->print_dependencies();
2167              assert(false, "Should have been marked for deoptimization");
2168            }
2169          }
2170        }
2171      }
2172      nm = (nmethod*) CodeCache::next_alive_blob(nm, code_blob_type);
2173    }
2174  }
2175 }
2176 
2177 bool nmethod::check_dependency_on(DepChange& changes) {
2178   // What has happened:
2179   // 1) a new class dependee has been added
2180   // 2) dependee and all its super classes have been marked
2181   bool found_check = false;  // set true if we are upset
2182   for (Dependencies::DepStream deps(this); deps.next(); ) {
2183     // Evaluate only relevant dependencies.
2184     if (deps.spot_check_dependency_at(changes) != NULL) {
2185       found_check = true;
2186       NOT_DEBUG(break);
2187     }
2188   }
2189   return found_check;
2190 }
2191 
2192 bool nmethod::is_evol_dependent_on(Klass* dependee) {
2193   InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);


2347                   (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2348   }
2349   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2350 };
2351 
2352 void nmethod::verify() {
2353 
2354   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2355   // seems odd.
2356 
2357   if( is_zombie() || is_not_entrant() )
2358     return;
2359 
2360   // Make sure all the entry points are correctly aligned for patching.
2361   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2362 
2363   // assert(method()->is_oop(), "must be valid");
2364 
2365   ResourceMark rm;
2366 
2367   if (!CodeCache::contains_nmethod(this)) {
2368     fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2369   }
2370 
2371   if(is_native_method() )
2372     return;
2373 
2374   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2375   if (nm != this) {
2376     fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2377                   this));
2378   }
2379 
2380   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2381     if (! p->verify(this)) {
2382       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2383     }
2384   }
2385 
2386   VerifyOopsClosure voc(this);
2387   oops_do(&voc);


src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File