< prev index next >

src/cpu/x86/vm/nativeInst_x86.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "nativeInst_x86.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/sharedRuntime.hpp"
  32 #include "runtime/stubRoutines.hpp"
  33 #include "utilities/ostream.hpp"
  34 #ifdef COMPILER1
  35 #include "c1/c1_Runtime1.hpp"
  36 #endif
  37 
  38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  39 
  40 void NativeInstruction::wrote(int offset) {
  41   ICache::invalidate_word(addr_at(offset));
  42 }
  43 
  44 
  45 void NativeCall::verify() {
  46   // Make sure code pattern is actually a call imm32 instruction.
  47   int inst = ubyte_at(0);
  48   if (inst != instruction_code) {
  49     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
  50                                                         inst);
  51     fatal("not a call disp32");
  52   }
  53 }
  54 
  55 address NativeCall::destination() const {
  56   // Getting the destination of a call isn't safe because that call can
  57   // be getting patched while you're calling this.  There's only special
  58   // places where this can be called but not automatically verifiable by
  59   // checking which locks are held.  The solution is true atomic patching
  60   // on x86, nyi.
  61   return return_address() + displacement();
  62 }
  63 
  64 void NativeCall::print() {


 457   guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
 458             "illegal address for code patching 2");
 459   // First 5 bytes must be within the same cache line - 4827828
 460   guarantee((uintptr_t) verified_entry / linesize ==
 461             ((uintptr_t) verified_entry + 4) / linesize,
 462             "illegal address for code patching 3");
 463 }
 464 
 465 
 466 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 467 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
 468 // First patches the first word atomically to be a jump to itself.
 469 // Then patches the last byte  and then atomically patches the first word (4-bytes),
 470 // thus inserting the desired jump
 471 // This code is mt-safe with the following conditions: entry point is 4 byte aligned,
 472 // entry point is in same cache line as unverified entry point, and the instruction being
 473 // patched is >= 5 byte (size of patch).
 474 //
 475 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
 476 // In C1 the restriction is enforced by CodeEmitter::method_entry

 477 //
 478 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 479   // complete jump instruction (to be inserted) is in code_buffer;
 480   unsigned char code_buffer[5];
 481   code_buffer[0] = instruction_code;
 482   intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
 483 #ifdef AMD64
 484   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 485 #endif // AMD64
 486   *(int32_t*)(code_buffer + 1) = (int32_t)disp;
 487 
 488   check_verified_entry_alignment(entry, verified_entry);
 489 
 490   // Can't call nativeJump_at() because it's asserts jump exists
 491   NativeJump* n_jump = (NativeJump*) verified_entry;
 492 
 493   //First patch dummy jmp in place
 494 
 495   unsigned char patch[4];
 496   assert(sizeof(patch)==sizeof(int32_t), "sanity check");




  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "nativeInst_x86.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/handles.hpp"
  31 #include "runtime/sharedRuntime.hpp"
  32 #include "runtime/stubRoutines.hpp"
  33 #include "utilities/ostream.hpp"
  34 #ifdef COMPILER1
  35 #include "c1/c1_Runtime1.hpp"
  36 #endif
  37 
  38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  39 
  40 void NativeInstruction::wrote(int offset) {
  41   ICache::invalidate_word(addr_at(offset));
  42 }
  43 

  44 void NativeCall::verify() {
  45   // Make sure code pattern is actually a call imm32 instruction.
  46   int inst = ubyte_at(0);
  47   if (inst != instruction_code) {
  48     tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
  49                                                         inst);
  50     fatal("not a call disp32");
  51   }
  52 }
  53 
  54 address NativeCall::destination() const {
  55   // Getting the destination of a call isn't safe because that call can
  56   // be getting patched while you're calling this.  There's only special
  57   // places where this can be called but not automatically verifiable by
  58   // checking which locks are held.  The solution is true atomic patching
  59   // on x86, nyi.
  60   return return_address() + displacement();
  61 }
  62 
  63 void NativeCall::print() {


 456   guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
 457             "illegal address for code patching 2");
 458   // First 5 bytes must be within the same cache line - 4827828
 459   guarantee((uintptr_t) verified_entry / linesize ==
 460             ((uintptr_t) verified_entry + 4) / linesize,
 461             "illegal address for code patching 3");
 462 }
 463 
 464 
 465 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
 466 // The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
 467 // First patches the first word atomically to be a jump to itself.
 468 // Then patches the last byte  and then atomically patches the first word (4-bytes),
 469 // thus inserting the desired jump
 470 // This code is mt-safe with the following conditions: entry point is 4 byte aligned,
 471 // entry point is in same cache line as unverified entry point, and the instruction being
 472 // patched is >= 5 byte (size of patch).
 473 //
 474 // In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
 475 // In C1 the restriction is enforced by CodeEmitter::method_entry
 476 // In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...)
 477 //
 478 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 479   // complete jump instruction (to be inserted) is in code_buffer;
 480   unsigned char code_buffer[5];
 481   code_buffer[0] = instruction_code;
 482   intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
 483 #ifdef AMD64
 484   guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
 485 #endif // AMD64
 486   *(int32_t*)(code_buffer + 1) = (int32_t)disp;
 487 
 488   check_verified_entry_alignment(entry, verified_entry);
 489 
 490   // Can't call nativeJump_at() because it's asserts jump exists
 491   NativeJump* n_jump = (NativeJump*) verified_entry;
 492 
 493   //First patch dummy jmp in place
 494 
 495   unsigned char patch[4];
 496   assert(sizeof(patch)==sizeof(int32_t), "sanity check");


< prev index next >