1 /* 2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/objArrayOop.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/atomic.inline.hpp" 42 #include "runtime/biasedLocking.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/interfaceSupport.hpp" 46 #include "runtime/orderAccess.inline.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/threadCritical.hpp" 49 #include "utilities/exceptions.hpp" 50 51 // no precompiled headers 52 #ifdef CC_INTERP 53 54 /* 55 * USELABELS - If using GCC, then use labels for the opcode dispatching 56 * rather -then a switch statement. This improves performance because it 57 * gives us the opportunity to have the instructions that calculate the 58 * next opcode to jump to be intermixed with the rest of the instructions 59 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 60 */ 61 #undef USELABELS 62 #ifdef __GNUC__ 63 /* 64 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 65 don't use the computed goto approach. 66 */ 67 #ifndef ASSERT 68 #define USELABELS 69 #endif 70 #endif 71 72 #undef CASE 73 #ifdef USELABELS 74 #define CASE(opcode) opc ## opcode 75 #define DEFAULT opc_default 76 #else 77 #define CASE(opcode) case Bytecodes:: opcode 78 #define DEFAULT default 79 #endif 80 81 /* 82 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 83 * opcode before going back to the top of the while loop, rather then having 84 * the top of the while loop handle it. This provides a better opportunity 85 * for instruction scheduling. Some compilers just do this prefetch 86 * automatically. Some actually end up with worse performance if you 87 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 88 */ 89 #undef PREFETCH_OPCCODE 90 #define PREFETCH_OPCCODE 91 92 /* 93 Interpreter safepoint: it is expected that the interpreter will have no live 94 handles of its own creation live at an interpreter safepoint. Therefore we 95 run a HandleMarkCleaner and trash all handles allocated in the call chain 96 since the JavaCalls::call_helper invocation that initiated the chain. 97 There really shouldn't be any handles remaining to trash but this is cheap 98 in relation to a safepoint. 99 */ 100 #define SAFEPOINT \ 101 if ( SafepointSynchronize::is_synchronizing()) { \ 102 { \ 103 /* zap freed handles rather than GC'ing them */ \ 104 HandleMarkCleaner __hmc(THREAD); \ 105 } \ 106 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 107 } 108 109 /* 110 * VM_JAVA_ERROR - Macro for throwing a java exception from 111 * the interpreter loop. Should really be a CALL_VM but there 112 * is no entry point to do the transition to vm so we just 113 * do it by hand here. 114 */ 115 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 116 DECACHE_STATE(); \ 117 SET_LAST_JAVA_FRAME(); \ 118 { \ 119 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 120 ThreadInVMfromJava trans(THREAD); \ 121 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 122 } \ 123 RESET_LAST_JAVA_FRAME(); \ 124 CACHE_STATE(); 125 126 // Normal throw of a java error. 127 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 128 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 129 goto handle_exception; 130 131 #ifdef PRODUCT 132 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 133 #else 134 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 135 { \ 136 BytecodeCounter::_counter_value++; \ 137 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 138 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 139 if (TraceBytecodes) { \ 140 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 141 topOfStack[Interpreter::expr_index_at(1)], \ 142 topOfStack[Interpreter::expr_index_at(2)]), \ 143 handle_exception); \ 144 } \ 145 } 146 #endif 147 148 #undef DEBUGGER_SINGLE_STEP_NOTIFY 149 #ifdef VM_JVMTI 150 /* NOTE: (kbr) This macro must be called AFTER the PC has been 151 incremented. JvmtiExport::at_single_stepping_point() may cause a 152 breakpoint opcode to get inserted at the current PC to allow the 153 debugger to coalesce single-step events. 154 155 As a result if we call at_single_stepping_point() we refetch opcode 156 to get the current opcode. This will override any other prefetching 157 that might have occurred. 158 */ 159 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 160 { \ 161 if (_jvmti_interp_events) { \ 162 if (JvmtiExport::should_post_single_step()) { \ 163 DECACHE_STATE(); \ 164 SET_LAST_JAVA_FRAME(); \ 165 ThreadInVMfromJava trans(THREAD); \ 166 JvmtiExport::at_single_stepping_point(THREAD, \ 167 istate->method(), \ 168 pc); \ 169 RESET_LAST_JAVA_FRAME(); \ 170 CACHE_STATE(); \ 171 if (THREAD->pop_frame_pending() && \ 172 !THREAD->pop_frame_in_process()) { \ 173 goto handle_Pop_Frame; \ 174 } \ 175 if (THREAD->jvmti_thread_state() && \ 176 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 177 goto handle_Early_Return; \ 178 } \ 179 opcode = *pc; \ 180 } \ 181 } \ 182 } 183 #else 184 #define DEBUGGER_SINGLE_STEP_NOTIFY() 185 #endif 186 187 /* 188 * CONTINUE - Macro for executing the next opcode. 189 */ 190 #undef CONTINUE 191 #ifdef USELABELS 192 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 193 // initialization (which is is the initialization of the table pointer...) 194 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 195 #define CONTINUE { \ 196 opcode = *pc; \ 197 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 198 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 199 DISPATCH(opcode); \ 200 } 201 #else 202 #ifdef PREFETCH_OPCCODE 203 #define CONTINUE { \ 204 opcode = *pc; \ 205 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 206 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 207 continue; \ 208 } 209 #else 210 #define CONTINUE { \ 211 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 212 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 213 continue; \ 214 } 215 #endif 216 #endif 217 218 219 #define UPDATE_PC(opsize) {pc += opsize; } 220 /* 221 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 222 */ 223 #undef UPDATE_PC_AND_TOS 224 #define UPDATE_PC_AND_TOS(opsize, stack) \ 225 {pc += opsize; MORE_STACK(stack); } 226 227 /* 228 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 229 * and executing the next opcode. It's somewhat similar to the combination 230 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 231 */ 232 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 233 #ifdef USELABELS 234 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 235 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 236 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 237 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 238 DISPATCH(opcode); \ 239 } 240 241 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 242 pc += opsize; opcode = *pc; \ 243 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 244 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 245 DISPATCH(opcode); \ 246 } 247 #else 248 #ifdef PREFETCH_OPCCODE 249 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 250 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 251 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 252 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 253 goto do_continue; \ 254 } 255 256 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 257 pc += opsize; opcode = *pc; \ 258 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 259 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 260 goto do_continue; \ 261 } 262 #else 263 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 264 pc += opsize; MORE_STACK(stack); \ 265 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 266 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 267 goto do_continue; \ 268 } 269 270 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 271 pc += opsize; \ 272 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 273 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 274 goto do_continue; \ 275 } 276 #endif /* PREFETCH_OPCCODE */ 277 #endif /* USELABELS */ 278 279 // About to call a new method, update the save the adjusted pc and return to frame manager 280 #define UPDATE_PC_AND_RETURN(opsize) \ 281 DECACHE_TOS(); \ 282 istate->set_bcp(pc+opsize); \ 283 return; 284 285 286 #define METHOD istate->method() 287 #define GET_METHOD_COUNTERS(res) \ 288 res = METHOD->method_counters(); \ 289 if (res == NULL) { \ 290 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 291 } 292 293 #define OSR_REQUEST(res, branch_pc) \ 294 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 295 /* 296 * For those opcodes that need to have a GC point on a backwards branch 297 */ 298 299 // Backedge counting is kind of strange. The asm interpreter will increment 300 // the backedge counter as a separate counter but it does it's comparisons 301 // to the sum (scaled) of invocation counter and backedge count to make 302 // a decision. Seems kind of odd to sum them together like that 303 304 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 305 306 307 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 308 if ((skip) <= 0) { \ 309 MethodCounters* mcs; \ 310 GET_METHOD_COUNTERS(mcs); \ 311 if (UseLoopCounter) { \ 312 bool do_OSR = UseOnStackReplacement; \ 313 mcs->backedge_counter()->increment(); \ 314 if (ProfileInterpreter) { \ 315 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 316 /* Check for overflow against MDO count. */ \ 317 do_OSR = do_OSR \ 318 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 319 /* When ProfileInterpreter is on, the backedge_count comes */ \ 320 /* from the methodDataOop, which value does not get reset on */ \ 321 /* the call to frequency_counter_overflow(). To avoid */ \ 322 /* excessive calls to the overflow routine while the method is */ \ 323 /* being compiled, add a second test to make sure the overflow */ \ 324 /* function is called only once every overflow_frequency. */ \ 325 && (!(mdo_last_branch_taken_count & 1023)); \ 326 } else { \ 327 /* check for overflow of backedge counter */ \ 328 do_OSR = do_OSR \ 329 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 330 } \ 331 if (do_OSR) { \ 332 nmethod* osr_nmethod; \ 333 OSR_REQUEST(osr_nmethod, branch_pc); \ 334 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 335 intptr_t* buf; \ 336 /* Call OSR migration with last java frame only, no checks. */ \ 337 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 338 istate->set_msg(do_osr); \ 339 istate->set_osr_buf((address)buf); \ 340 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 341 return; \ 342 } \ 343 } \ 344 } /* UseCompiler ... */ \ 345 SAFEPOINT; \ 346 } 347 348 /* 349 * For those opcodes that need to have a GC point on a backwards branch 350 */ 351 352 /* 353 * Macros for caching and flushing the interpreter state. Some local 354 * variables need to be flushed out to the frame before we do certain 355 * things (like pushing frames or becomming gc safe) and some need to 356 * be recached later (like after popping a frame). We could use one 357 * macro to cache or decache everything, but this would be less then 358 * optimal because we don't always need to cache or decache everything 359 * because some things we know are already cached or decached. 360 */ 361 #undef DECACHE_TOS 362 #undef CACHE_TOS 363 #undef CACHE_PREV_TOS 364 #define DECACHE_TOS() istate->set_stack(topOfStack); 365 366 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 367 368 #undef DECACHE_PC 369 #undef CACHE_PC 370 #define DECACHE_PC() istate->set_bcp(pc); 371 #define CACHE_PC() pc = istate->bcp(); 372 #define CACHE_CP() cp = istate->constants(); 373 #define CACHE_LOCALS() locals = istate->locals(); 374 #undef CACHE_FRAME 375 #define CACHE_FRAME() 376 377 // BCI() returns the current bytecode-index. 378 #undef BCI 379 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 380 381 /* 382 * CHECK_NULL - Macro for throwing a NullPointerException if the object 383 * passed is a null ref. 384 * On some architectures/platforms it should be possible to do this implicitly 385 */ 386 #undef CHECK_NULL 387 #define CHECK_NULL(obj_) \ 388 if ((obj_) == NULL) { \ 389 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 390 } \ 391 VERIFY_OOP(obj_) 392 393 #define VMdoubleConstZero() 0.0 394 #define VMdoubleConstOne() 1.0 395 #define VMlongConstZero() (max_jlong-max_jlong) 396 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 397 398 /* 399 * Alignment 400 */ 401 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 402 403 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 404 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 405 406 // Reload interpreter state after calling the VM or a possible GC 407 #define CACHE_STATE() \ 408 CACHE_TOS(); \ 409 CACHE_PC(); \ 410 CACHE_CP(); \ 411 CACHE_LOCALS(); 412 413 // Call the VM with last java frame only. 414 #define CALL_VM_NAKED_LJF(func) \ 415 DECACHE_STATE(); \ 416 SET_LAST_JAVA_FRAME(); \ 417 func; \ 418 RESET_LAST_JAVA_FRAME(); \ 419 CACHE_STATE(); 420 421 // Call the VM. Don't check for pending exceptions. 422 #define CALL_VM_NOCHECK(func) \ 423 CALL_VM_NAKED_LJF(func) \ 424 if (THREAD->pop_frame_pending() && \ 425 !THREAD->pop_frame_in_process()) { \ 426 goto handle_Pop_Frame; \ 427 } \ 428 if (THREAD->jvmti_thread_state() && \ 429 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 430 goto handle_Early_Return; \ 431 } 432 433 // Call the VM and check for pending exceptions 434 #define CALL_VM(func, label) { \ 435 CALL_VM_NOCHECK(func); \ 436 if (THREAD->has_pending_exception()) goto label; \ 437 } 438 439 /* 440 * BytecodeInterpreter::run(interpreterState istate) 441 * BytecodeInterpreter::runWithChecks(interpreterState istate) 442 * 443 * The real deal. This is where byte codes actually get interpreted. 444 * Basically it's a big while loop that iterates until we return from 445 * the method passed in. 446 * 447 * The runWithChecks is used if JVMTI is enabled. 448 * 449 */ 450 #if defined(VM_JVMTI) 451 void 452 BytecodeInterpreter::runWithChecks(interpreterState istate) { 453 #else 454 void 455 BytecodeInterpreter::run(interpreterState istate) { 456 #endif 457 458 // In order to simplify some tests based on switches set at runtime 459 // we invoke the interpreter a single time after switches are enabled 460 // and set simpler to to test variables rather than method calls or complex 461 // boolean expressions. 462 463 static int initialized = 0; 464 static int checkit = 0; 465 static intptr_t* c_addr = NULL; 466 static intptr_t c_value; 467 468 if (checkit && *c_addr != c_value) { 469 os::breakpoint(); 470 } 471 #ifdef VM_JVMTI 472 static bool _jvmti_interp_events = 0; 473 #endif 474 475 static int _compiling; // (UseCompiler || CountCompiledCalls) 476 477 #ifdef ASSERT 478 if (istate->_msg != initialize) { 479 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 480 #ifndef SHARK 481 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 482 #endif // !SHARK 483 } 484 // Verify linkages. 485 interpreterState l = istate; 486 do { 487 assert(l == l->_self_link, "bad link"); 488 l = l->_prev_link; 489 } while (l != NULL); 490 // Screwups with stack management usually cause us to overwrite istate 491 // save a copy so we can verify it. 492 interpreterState orig = istate; 493 #endif 494 495 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 496 register address pc = istate->bcp(); 497 register jubyte opcode; 498 register intptr_t* locals = istate->locals(); 499 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 500 #ifdef LOTS_OF_REGS 501 register JavaThread* THREAD = istate->thread(); 502 #else 503 #undef THREAD 504 #define THREAD istate->thread() 505 #endif 506 507 #ifdef USELABELS 508 const static void* const opclabels_data[256] = { 509 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 510 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 511 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 512 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 513 514 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 515 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 516 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 517 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 518 519 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 520 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 521 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 522 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 523 524 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 525 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 526 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 527 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 528 529 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 530 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 531 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 532 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 533 534 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 535 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 536 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 537 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 538 539 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 540 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 541 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 542 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 543 544 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 545 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 546 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 547 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 548 549 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 550 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 551 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 552 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 553 554 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 555 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 556 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 557 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 558 559 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 560 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 561 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 562 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 563 564 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 565 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 566 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 567 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 568 569 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 570 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 571 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 572 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 573 574 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 575 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 576 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 578 579 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 580 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 581 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 582 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 583 584 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 586 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 587 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 588 }; 589 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 590 #endif /* USELABELS */ 591 592 #ifdef ASSERT 593 // this will trigger a VERIFY_OOP on entry 594 if (istate->msg() != initialize && ! METHOD->is_static()) { 595 oop rcvr = LOCALS_OBJECT(0); 596 VERIFY_OOP(rcvr); 597 } 598 #endif 599 // #define HACK 600 #ifdef HACK 601 bool interesting = false; 602 #endif // HACK 603 604 /* QQQ this should be a stack method so we don't know actual direction */ 605 guarantee(istate->msg() == initialize || 606 topOfStack >= istate->stack_limit() && 607 topOfStack < istate->stack_base(), 608 "Stack top out of range"); 609 610 #ifdef CC_INTERP_PROFILE 611 // MethodData's last branch taken count. 612 uint mdo_last_branch_taken_count = 0; 613 #else 614 const uint mdo_last_branch_taken_count = 0; 615 #endif 616 617 switch (istate->msg()) { 618 case initialize: { 619 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 620 _compiling = (UseCompiler || CountCompiledCalls); 621 #ifdef VM_JVMTI 622 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 623 #endif 624 return; 625 } 626 break; 627 case method_entry: { 628 THREAD->set_do_not_unlock(); 629 // count invocations 630 assert(initialized, "Interpreter not initialized"); 631 if (_compiling) { 632 MethodCounters* mcs; 633 GET_METHOD_COUNTERS(mcs); 634 if (ProfileInterpreter) { 635 METHOD->increment_interpreter_invocation_count(THREAD); 636 } 637 mcs->invocation_counter()->increment(); 638 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 639 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 640 // We no longer retry on a counter overflow. 641 } 642 // Get or create profile data. Check for pending (async) exceptions. 643 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 644 SAFEPOINT; 645 } 646 647 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 648 // initialize 649 os::breakpoint(); 650 } 651 652 #ifdef HACK 653 { 654 ResourceMark rm; 655 char *method_name = istate->method()->name_and_sig_as_C_string(); 656 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 657 tty->print_cr("entering: depth %d bci: %d", 658 (istate->_stack_base - istate->_stack), 659 istate->_bcp - istate->_method->code_base()); 660 interesting = true; 661 } 662 } 663 #endif // HACK 664 665 // Lock method if synchronized. 666 if (METHOD->is_synchronized()) { 667 // oop rcvr = locals[0].j.r; 668 oop rcvr; 669 if (METHOD->is_static()) { 670 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 671 } else { 672 rcvr = LOCALS_OBJECT(0); 673 VERIFY_OOP(rcvr); 674 } 675 // The initial monitor is ours for the taking. 676 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 677 BasicObjectLock* mon = &istate->monitor_base()[-1]; 678 mon->set_obj(rcvr); 679 bool success = false; 680 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 681 markOop mark = rcvr->mark(); 682 intptr_t hash = (intptr_t) markOopDesc::no_hash; 683 // Implies UseBiasedLocking. 684 if (mark->has_bias_pattern()) { 685 uintptr_t thread_ident; 686 uintptr_t anticipated_bias_locking_value; 687 thread_ident = (uintptr_t)istate->thread(); 688 anticipated_bias_locking_value = 689 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 690 ~((uintptr_t) markOopDesc::age_mask_in_place); 691 692 if (anticipated_bias_locking_value == 0) { 693 // Already biased towards this thread, nothing to do. 694 if (PrintBiasedLockingStatistics) { 695 (* BiasedLocking::biased_lock_entry_count_addr())++; 696 } 697 success = true; 698 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 699 // Try to revoke bias. 700 markOop header = rcvr->klass()->prototype_header(); 701 if (hash != markOopDesc::no_hash) { 702 header = header->copy_set_hash(hash); 703 } 704 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 705 if (PrintBiasedLockingStatistics) 706 (*BiasedLocking::revoked_lock_entry_count_addr())++; 707 } 708 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 709 // Try to rebias. 710 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 711 if (hash != markOopDesc::no_hash) { 712 new_header = new_header->copy_set_hash(hash); 713 } 714 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 715 if (PrintBiasedLockingStatistics) { 716 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 717 } 718 } else { 719 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 720 } 721 success = true; 722 } else { 723 // Try to bias towards thread in case object is anonymously biased. 724 markOop header = (markOop) ((uintptr_t) mark & 725 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 726 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 727 if (hash != markOopDesc::no_hash) { 728 header = header->copy_set_hash(hash); 729 } 730 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 731 // Debugging hint. 732 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 733 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 734 if (PrintBiasedLockingStatistics) { 735 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 736 } 737 } else { 738 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 739 } 740 success = true; 741 } 742 } 743 744 // Traditional lightweight locking. 745 if (!success) { 746 markOop displaced = rcvr->mark()->set_unlocked(); 747 mon->lock()->set_displaced_header(displaced); 748 bool call_vm = UseHeavyMonitors; 749 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 750 // Is it simple recursive case? 751 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 752 mon->lock()->set_displaced_header(NULL); 753 } else { 754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 755 } 756 } 757 } 758 } 759 THREAD->clr_do_not_unlock(); 760 761 // Notify jvmti 762 #ifdef VM_JVMTI 763 if (_jvmti_interp_events) { 764 // Whenever JVMTI puts a thread in interp_only_mode, method 765 // entry/exit events are sent for that thread to track stack depth. 766 if (THREAD->is_interp_only_mode()) { 767 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 768 handle_exception); 769 } 770 } 771 #endif /* VM_JVMTI */ 772 773 goto run; 774 } 775 776 case popping_frame: { 777 // returned from a java call to pop the frame, restart the call 778 // clear the message so we don't confuse ourselves later 779 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 780 istate->set_msg(no_request); 781 if (_compiling) { 782 // Set MDX back to the ProfileData of the invoke bytecode that will be 783 // restarted. 784 SET_MDX(NULL); 785 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 786 } 787 THREAD->clr_pop_frame_in_process(); 788 goto run; 789 } 790 791 case method_resume: { 792 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 793 // resume 794 os::breakpoint(); 795 } 796 #ifdef HACK 797 { 798 ResourceMark rm; 799 char *method_name = istate->method()->name_and_sig_as_C_string(); 800 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 801 tty->print_cr("resume: depth %d bci: %d", 802 (istate->_stack_base - istate->_stack) , 803 istate->_bcp - istate->_method->code_base()); 804 interesting = true; 805 } 806 } 807 #endif // HACK 808 // returned from a java call, continue executing. 809 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 810 goto handle_Pop_Frame; 811 } 812 if (THREAD->jvmti_thread_state() && 813 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 814 goto handle_Early_Return; 815 } 816 817 if (THREAD->has_pending_exception()) goto handle_exception; 818 // Update the pc by the saved amount of the invoke bytecode size 819 UPDATE_PC(istate->bcp_advance()); 820 821 if (_compiling) { 822 // Get or create profile data. Check for pending (async) exceptions. 823 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 824 } 825 goto run; 826 } 827 828 case deopt_resume2: { 829 // Returned from an opcode that will reexecute. Deopt was 830 // a result of a PopFrame request. 831 // 832 833 if (_compiling) { 834 // Get or create profile data. Check for pending (async) exceptions. 835 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 836 } 837 goto run; 838 } 839 840 case deopt_resume: { 841 // Returned from an opcode that has completed. The stack has 842 // the result all we need to do is skip across the bytecode 843 // and continue (assuming there is no exception pending) 844 // 845 // compute continuation length 846 // 847 // Note: it is possible to deopt at a return_register_finalizer opcode 848 // because this requires entering the vm to do the registering. While the 849 // opcode is complete we can't advance because there are no more opcodes 850 // much like trying to deopt at a poll return. In that has we simply 851 // get out of here 852 // 853 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 854 // this will do the right thing even if an exception is pending. 855 goto handle_return; 856 } 857 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 858 if (THREAD->has_pending_exception()) goto handle_exception; 859 860 if (_compiling) { 861 // Get or create profile data. Check for pending (async) exceptions. 862 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 863 } 864 goto run; 865 } 866 case got_monitors: { 867 // continue locking now that we have a monitor to use 868 // we expect to find newly allocated monitor at the "top" of the monitor stack. 869 oop lockee = STACK_OBJECT(-1); 870 VERIFY_OOP(lockee); 871 // derefing's lockee ought to provoke implicit null check 872 // find a free monitor 873 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 874 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 875 entry->set_obj(lockee); 876 bool success = false; 877 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 878 879 markOop mark = lockee->mark(); 880 intptr_t hash = (intptr_t) markOopDesc::no_hash; 881 // implies UseBiasedLocking 882 if (mark->has_bias_pattern()) { 883 uintptr_t thread_ident; 884 uintptr_t anticipated_bias_locking_value; 885 thread_ident = (uintptr_t)istate->thread(); 886 anticipated_bias_locking_value = 887 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 888 ~((uintptr_t) markOopDesc::age_mask_in_place); 889 890 if (anticipated_bias_locking_value == 0) { 891 // already biased towards this thread, nothing to do 892 if (PrintBiasedLockingStatistics) { 893 (* BiasedLocking::biased_lock_entry_count_addr())++; 894 } 895 success = true; 896 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 897 // try revoke bias 898 markOop header = lockee->klass()->prototype_header(); 899 if (hash != markOopDesc::no_hash) { 900 header = header->copy_set_hash(hash); 901 } 902 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 903 if (PrintBiasedLockingStatistics) { 904 (*BiasedLocking::revoked_lock_entry_count_addr())++; 905 } 906 } 907 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 908 // try rebias 909 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 910 if (hash != markOopDesc::no_hash) { 911 new_header = new_header->copy_set_hash(hash); 912 } 913 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 914 if (PrintBiasedLockingStatistics) { 915 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 916 } 917 } else { 918 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 919 } 920 success = true; 921 } else { 922 // try to bias towards thread in case object is anonymously biased 923 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 924 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 925 if (hash != markOopDesc::no_hash) { 926 header = header->copy_set_hash(hash); 927 } 928 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 929 // debugging hint 930 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 931 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 932 if (PrintBiasedLockingStatistics) { 933 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 934 } 935 } else { 936 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 937 } 938 success = true; 939 } 940 } 941 942 // traditional lightweight locking 943 if (!success) { 944 markOop displaced = lockee->mark()->set_unlocked(); 945 entry->lock()->set_displaced_header(displaced); 946 bool call_vm = UseHeavyMonitors; 947 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 948 // Is it simple recursive case? 949 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 950 entry->lock()->set_displaced_header(NULL); 951 } else { 952 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 953 } 954 } 955 } 956 UPDATE_PC_AND_TOS(1, -1); 957 goto run; 958 } 959 default: { 960 fatal("Unexpected message from frame manager"); 961 } 962 } 963 964 run: 965 966 DO_UPDATE_INSTRUCTION_COUNT(*pc) 967 DEBUGGER_SINGLE_STEP_NOTIFY(); 968 #ifdef PREFETCH_OPCCODE 969 opcode = *pc; /* prefetch first opcode */ 970 #endif 971 972 #ifndef USELABELS 973 while (1) 974 #endif 975 { 976 #ifndef PREFETCH_OPCCODE 977 opcode = *pc; 978 #endif 979 // Seems like this happens twice per opcode. At worst this is only 980 // need at entry to the loop. 981 // DEBUGGER_SINGLE_STEP_NOTIFY(); 982 /* Using this labels avoids double breakpoints when quickening and 983 * when returing from transition frames. 984 */ 985 opcode_switch: 986 assert(istate == orig, "Corrupted istate"); 987 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 988 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 989 assert(topOfStack < istate->stack_base(), "Stack underrun"); 990 991 #ifdef USELABELS 992 DISPATCH(opcode); 993 #else 994 switch (opcode) 995 #endif 996 { 997 CASE(_nop): 998 UPDATE_PC_AND_CONTINUE(1); 999 1000 /* Push miscellaneous constants onto the stack. */ 1001 1002 CASE(_aconst_null): 1003 SET_STACK_OBJECT(NULL, 0); 1004 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1005 1006 #undef OPC_CONST_n 1007 #define OPC_CONST_n(opcode, const_type, value) \ 1008 CASE(opcode): \ 1009 SET_STACK_ ## const_type(value, 0); \ 1010 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1011 1012 OPC_CONST_n(_iconst_m1, INT, -1); 1013 OPC_CONST_n(_iconst_0, INT, 0); 1014 OPC_CONST_n(_iconst_1, INT, 1); 1015 OPC_CONST_n(_iconst_2, INT, 2); 1016 OPC_CONST_n(_iconst_3, INT, 3); 1017 OPC_CONST_n(_iconst_4, INT, 4); 1018 OPC_CONST_n(_iconst_5, INT, 5); 1019 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1020 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1021 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1022 1023 #undef OPC_CONST2_n 1024 #define OPC_CONST2_n(opcname, value, key, kind) \ 1025 CASE(_##opcname): \ 1026 { \ 1027 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1028 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1029 } 1030 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1031 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1032 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1033 OPC_CONST2_n(lconst_1, One, long, LONG); 1034 1035 /* Load constant from constant pool: */ 1036 1037 /* Push a 1-byte signed integer value onto the stack. */ 1038 CASE(_bipush): 1039 SET_STACK_INT((jbyte)(pc[1]), 0); 1040 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1041 1042 /* Push a 2-byte signed integer constant onto the stack. */ 1043 CASE(_sipush): 1044 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1045 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1046 1047 /* load from local variable */ 1048 1049 CASE(_aload): 1050 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1051 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1052 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1053 1054 CASE(_iload): 1055 CASE(_fload): 1056 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1058 1059 CASE(_lload): 1060 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1061 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1062 1063 CASE(_dload): 1064 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1065 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1066 1067 #undef OPC_LOAD_n 1068 #define OPC_LOAD_n(num) \ 1069 CASE(_aload_##num): \ 1070 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1071 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1072 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1073 \ 1074 CASE(_iload_##num): \ 1075 CASE(_fload_##num): \ 1076 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1078 \ 1079 CASE(_lload_##num): \ 1080 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1081 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1082 CASE(_dload_##num): \ 1083 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1084 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1085 1086 OPC_LOAD_n(0); 1087 OPC_LOAD_n(1); 1088 OPC_LOAD_n(2); 1089 OPC_LOAD_n(3); 1090 1091 /* store to a local variable */ 1092 1093 CASE(_astore): 1094 astore(topOfStack, -1, locals, pc[1]); 1095 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1096 1097 CASE(_istore): 1098 CASE(_fstore): 1099 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1100 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1101 1102 CASE(_lstore): 1103 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1104 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1105 1106 CASE(_dstore): 1107 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1108 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1109 1110 CASE(_wide): { 1111 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1112 1113 opcode = pc[1]; 1114 1115 // Wide and it's sub-bytecode are counted as separate instructions. If we 1116 // don't account for this here, the bytecode trace skips the next bytecode. 1117 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1118 1119 switch(opcode) { 1120 case Bytecodes::_aload: 1121 VERIFY_OOP(LOCALS_OBJECT(reg)); 1122 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1123 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1124 1125 case Bytecodes::_iload: 1126 case Bytecodes::_fload: 1127 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1128 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1129 1130 case Bytecodes::_lload: 1131 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1132 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1133 1134 case Bytecodes::_dload: 1135 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1136 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1137 1138 case Bytecodes::_astore: 1139 astore(topOfStack, -1, locals, reg); 1140 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1141 1142 case Bytecodes::_istore: 1143 case Bytecodes::_fstore: 1144 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1145 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1146 1147 case Bytecodes::_lstore: 1148 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1149 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1150 1151 case Bytecodes::_dstore: 1152 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1153 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1154 1155 case Bytecodes::_iinc: { 1156 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1157 // Be nice to see what this generates.... QQQ 1158 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1159 UPDATE_PC_AND_CONTINUE(6); 1160 } 1161 case Bytecodes::_ret: 1162 // Profile ret. 1163 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1164 // Now, update the pc. 1165 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1166 UPDATE_PC_AND_CONTINUE(0); 1167 default: 1168 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1169 } 1170 } 1171 1172 1173 #undef OPC_STORE_n 1174 #define OPC_STORE_n(num) \ 1175 CASE(_astore_##num): \ 1176 astore(topOfStack, -1, locals, num); \ 1177 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1178 CASE(_istore_##num): \ 1179 CASE(_fstore_##num): \ 1180 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1182 1183 OPC_STORE_n(0); 1184 OPC_STORE_n(1); 1185 OPC_STORE_n(2); 1186 OPC_STORE_n(3); 1187 1188 #undef OPC_DSTORE_n 1189 #define OPC_DSTORE_n(num) \ 1190 CASE(_dstore_##num): \ 1191 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1192 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1193 CASE(_lstore_##num): \ 1194 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1195 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1196 1197 OPC_DSTORE_n(0); 1198 OPC_DSTORE_n(1); 1199 OPC_DSTORE_n(2); 1200 OPC_DSTORE_n(3); 1201 1202 /* stack pop, dup, and insert opcodes */ 1203 1204 1205 CASE(_pop): /* Discard the top item on the stack */ 1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1207 1208 1209 CASE(_pop2): /* Discard the top 2 items on the stack */ 1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1211 1212 1213 CASE(_dup): /* Duplicate the top item on the stack */ 1214 dup(topOfStack); 1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1216 1217 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1218 dup2(topOfStack); 1219 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1220 1221 CASE(_dup_x1): /* insert top word two down */ 1222 dup_x1(topOfStack); 1223 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1224 1225 CASE(_dup_x2): /* insert top word three down */ 1226 dup_x2(topOfStack); 1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1228 1229 CASE(_dup2_x1): /* insert top 2 slots three down */ 1230 dup2_x1(topOfStack); 1231 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1232 1233 CASE(_dup2_x2): /* insert top 2 slots four down */ 1234 dup2_x2(topOfStack); 1235 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1236 1237 CASE(_swap): { /* swap top two elements on the stack */ 1238 swap(topOfStack); 1239 UPDATE_PC_AND_CONTINUE(1); 1240 } 1241 1242 /* Perform various binary integer operations */ 1243 1244 #undef OPC_INT_BINARY 1245 #define OPC_INT_BINARY(opcname, opname, test) \ 1246 CASE(_i##opcname): \ 1247 if (test && (STACK_INT(-1) == 0)) { \ 1248 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1249 "/ by zero", note_div0Check_trap); \ 1250 } \ 1251 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1252 STACK_INT(-1)), \ 1253 -2); \ 1254 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1255 CASE(_l##opcname): \ 1256 { \ 1257 if (test) { \ 1258 jlong l1 = STACK_LONG(-1); \ 1259 if (VMlongEqz(l1)) { \ 1260 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1261 "/ by long zero", note_div0Check_trap); \ 1262 } \ 1263 } \ 1264 /* First long at (-1,-2) next long at (-3,-4) */ \ 1265 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1266 STACK_LONG(-1)), \ 1267 -3); \ 1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1269 } 1270 1271 OPC_INT_BINARY(add, Add, 0); 1272 OPC_INT_BINARY(sub, Sub, 0); 1273 OPC_INT_BINARY(mul, Mul, 0); 1274 OPC_INT_BINARY(and, And, 0); 1275 OPC_INT_BINARY(or, Or, 0); 1276 OPC_INT_BINARY(xor, Xor, 0); 1277 OPC_INT_BINARY(div, Div, 1); 1278 OPC_INT_BINARY(rem, Rem, 1); 1279 1280 1281 /* Perform various binary floating number operations */ 1282 /* On some machine/platforms/compilers div zero check can be implicit */ 1283 1284 #undef OPC_FLOAT_BINARY 1285 #define OPC_FLOAT_BINARY(opcname, opname) \ 1286 CASE(_d##opcname): { \ 1287 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1288 STACK_DOUBLE(-1)), \ 1289 -3); \ 1290 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1291 } \ 1292 CASE(_f##opcname): \ 1293 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1294 STACK_FLOAT(-1)), \ 1295 -2); \ 1296 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1297 1298 1299 OPC_FLOAT_BINARY(add, Add); 1300 OPC_FLOAT_BINARY(sub, Sub); 1301 OPC_FLOAT_BINARY(mul, Mul); 1302 OPC_FLOAT_BINARY(div, Div); 1303 OPC_FLOAT_BINARY(rem, Rem); 1304 1305 /* Shift operations 1306 * Shift left int and long: ishl, lshl 1307 * Logical shift right int and long w/zero extension: iushr, lushr 1308 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1309 */ 1310 1311 #undef OPC_SHIFT_BINARY 1312 #define OPC_SHIFT_BINARY(opcname, opname) \ 1313 CASE(_i##opcname): \ 1314 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1315 STACK_INT(-1)), \ 1316 -2); \ 1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1318 CASE(_l##opcname): \ 1319 { \ 1320 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1321 STACK_INT(-1)), \ 1322 -2); \ 1323 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1324 } 1325 1326 OPC_SHIFT_BINARY(shl, Shl); 1327 OPC_SHIFT_BINARY(shr, Shr); 1328 OPC_SHIFT_BINARY(ushr, Ushr); 1329 1330 /* Increment local variable by constant */ 1331 CASE(_iinc): 1332 { 1333 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1334 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1335 UPDATE_PC_AND_CONTINUE(3); 1336 } 1337 1338 /* negate the value on the top of the stack */ 1339 1340 CASE(_ineg): 1341 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1342 UPDATE_PC_AND_CONTINUE(1); 1343 1344 CASE(_fneg): 1345 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1346 UPDATE_PC_AND_CONTINUE(1); 1347 1348 CASE(_lneg): 1349 { 1350 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1351 UPDATE_PC_AND_CONTINUE(1); 1352 } 1353 1354 CASE(_dneg): 1355 { 1356 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1357 UPDATE_PC_AND_CONTINUE(1); 1358 } 1359 1360 /* Conversion operations */ 1361 1362 CASE(_i2f): /* convert top of stack int to float */ 1363 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1364 UPDATE_PC_AND_CONTINUE(1); 1365 1366 CASE(_i2l): /* convert top of stack int to long */ 1367 { 1368 // this is ugly QQQ 1369 jlong r = VMint2Long(STACK_INT(-1)); 1370 MORE_STACK(-1); // Pop 1371 SET_STACK_LONG(r, 1); 1372 1373 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1374 } 1375 1376 CASE(_i2d): /* convert top of stack int to double */ 1377 { 1378 // this is ugly QQQ (why cast to jlong?? ) 1379 jdouble r = (jlong)STACK_INT(-1); 1380 MORE_STACK(-1); // Pop 1381 SET_STACK_DOUBLE(r, 1); 1382 1383 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1384 } 1385 1386 CASE(_l2i): /* convert top of stack long to int */ 1387 { 1388 jint r = VMlong2Int(STACK_LONG(-1)); 1389 MORE_STACK(-2); // Pop 1390 SET_STACK_INT(r, 0); 1391 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1392 } 1393 1394 CASE(_l2f): /* convert top of stack long to float */ 1395 { 1396 jlong r = STACK_LONG(-1); 1397 MORE_STACK(-2); // Pop 1398 SET_STACK_FLOAT(VMlong2Float(r), 0); 1399 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1400 } 1401 1402 CASE(_l2d): /* convert top of stack long to double */ 1403 { 1404 jlong r = STACK_LONG(-1); 1405 MORE_STACK(-2); // Pop 1406 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1407 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1408 } 1409 1410 CASE(_f2i): /* Convert top of stack float to int */ 1411 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1412 UPDATE_PC_AND_CONTINUE(1); 1413 1414 CASE(_f2l): /* convert top of stack float to long */ 1415 { 1416 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1417 MORE_STACK(-1); // POP 1418 SET_STACK_LONG(r, 1); 1419 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1420 } 1421 1422 CASE(_f2d): /* convert top of stack float to double */ 1423 { 1424 jfloat f; 1425 jdouble r; 1426 f = STACK_FLOAT(-1); 1427 r = (jdouble) f; 1428 MORE_STACK(-1); // POP 1429 SET_STACK_DOUBLE(r, 1); 1430 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1431 } 1432 1433 CASE(_d2i): /* convert top of stack double to int */ 1434 { 1435 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1436 MORE_STACK(-2); 1437 SET_STACK_INT(r1, 0); 1438 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1439 } 1440 1441 CASE(_d2f): /* convert top of stack double to float */ 1442 { 1443 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1444 MORE_STACK(-2); 1445 SET_STACK_FLOAT(r1, 0); 1446 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1447 } 1448 1449 CASE(_d2l): /* convert top of stack double to long */ 1450 { 1451 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1452 MORE_STACK(-2); 1453 SET_STACK_LONG(r1, 1); 1454 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1455 } 1456 1457 CASE(_i2b): 1458 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1459 UPDATE_PC_AND_CONTINUE(1); 1460 1461 CASE(_i2c): 1462 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1463 UPDATE_PC_AND_CONTINUE(1); 1464 1465 CASE(_i2s): 1466 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1467 UPDATE_PC_AND_CONTINUE(1); 1468 1469 /* comparison operators */ 1470 1471 1472 #define COMPARISON_OP(name, comparison) \ 1473 CASE(_if_icmp##name): { \ 1474 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1475 int skip = cmp \ 1476 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1477 address branch_pc = pc; \ 1478 /* Profile branch. */ \ 1479 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1480 UPDATE_PC_AND_TOS(skip, -2); \ 1481 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1482 CONTINUE; \ 1483 } \ 1484 CASE(_if##name): { \ 1485 const bool cmp = (STACK_INT(-1) comparison 0); \ 1486 int skip = cmp \ 1487 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1488 address branch_pc = pc; \ 1489 /* Profile branch. */ \ 1490 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1491 UPDATE_PC_AND_TOS(skip, -1); \ 1492 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1493 CONTINUE; \ 1494 } 1495 1496 #define COMPARISON_OP2(name, comparison) \ 1497 COMPARISON_OP(name, comparison) \ 1498 CASE(_if_acmp##name): { \ 1499 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1500 int skip = cmp \ 1501 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1502 address branch_pc = pc; \ 1503 /* Profile branch. */ \ 1504 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1505 UPDATE_PC_AND_TOS(skip, -2); \ 1506 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1507 CONTINUE; \ 1508 } 1509 1510 #define NULL_COMPARISON_NOT_OP(name) \ 1511 CASE(_if##name): { \ 1512 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1513 int skip = cmp \ 1514 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1515 address branch_pc = pc; \ 1516 /* Profile branch. */ \ 1517 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1518 UPDATE_PC_AND_TOS(skip, -1); \ 1519 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1520 CONTINUE; \ 1521 } 1522 1523 #define NULL_COMPARISON_OP(name) \ 1524 CASE(_if##name): { \ 1525 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1526 int skip = cmp \ 1527 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1528 address branch_pc = pc; \ 1529 /* Profile branch. */ \ 1530 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1531 UPDATE_PC_AND_TOS(skip, -1); \ 1532 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1533 CONTINUE; \ 1534 } 1535 COMPARISON_OP(lt, <); 1536 COMPARISON_OP(gt, >); 1537 COMPARISON_OP(le, <=); 1538 COMPARISON_OP(ge, >=); 1539 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1540 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1541 NULL_COMPARISON_OP(null); 1542 NULL_COMPARISON_NOT_OP(nonnull); 1543 1544 /* Goto pc at specified offset in switch table. */ 1545 1546 CASE(_tableswitch): { 1547 jint* lpc = (jint*)VMalignWordUp(pc+1); 1548 int32_t key = STACK_INT(-1); 1549 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1550 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1551 int32_t skip; 1552 key -= low; 1553 if (((uint32_t) key > (uint32_t)(high - low))) { 1554 key = -1; 1555 skip = Bytes::get_Java_u4((address)&lpc[0]); 1556 } else { 1557 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1558 } 1559 // Profile switch. 1560 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1561 // Does this really need a full backedge check (osr)? 1562 address branch_pc = pc; 1563 UPDATE_PC_AND_TOS(skip, -1); 1564 DO_BACKEDGE_CHECKS(skip, branch_pc); 1565 CONTINUE; 1566 } 1567 1568 /* Goto pc whose table entry matches specified key. */ 1569 1570 CASE(_lookupswitch): { 1571 jint* lpc = (jint*)VMalignWordUp(pc+1); 1572 int32_t key = STACK_INT(-1); 1573 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1574 // Remember index. 1575 int index = -1; 1576 int newindex = 0; 1577 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1578 while (--npairs >= 0) { 1579 lpc += 2; 1580 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1581 skip = Bytes::get_Java_u4((address)&lpc[1]); 1582 index = newindex; 1583 break; 1584 } 1585 newindex += 1; 1586 } 1587 // Profile switch. 1588 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1589 address branch_pc = pc; 1590 UPDATE_PC_AND_TOS(skip, -1); 1591 DO_BACKEDGE_CHECKS(skip, branch_pc); 1592 CONTINUE; 1593 } 1594 1595 CASE(_fcmpl): 1596 CASE(_fcmpg): 1597 { 1598 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1599 STACK_FLOAT(-1), 1600 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1601 -2); 1602 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1603 } 1604 1605 CASE(_dcmpl): 1606 CASE(_dcmpg): 1607 { 1608 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1609 STACK_DOUBLE(-1), 1610 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1611 MORE_STACK(-4); // Pop 1612 SET_STACK_INT(r, 0); 1613 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1614 } 1615 1616 CASE(_lcmp): 1617 { 1618 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1619 MORE_STACK(-4); 1620 SET_STACK_INT(r, 0); 1621 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1622 } 1623 1624 1625 /* Return from a method */ 1626 1627 CASE(_areturn): 1628 CASE(_ireturn): 1629 CASE(_freturn): 1630 { 1631 // Allow a safepoint before returning to frame manager. 1632 SAFEPOINT; 1633 1634 goto handle_return; 1635 } 1636 1637 CASE(_lreturn): 1638 CASE(_dreturn): 1639 { 1640 // Allow a safepoint before returning to frame manager. 1641 SAFEPOINT; 1642 goto handle_return; 1643 } 1644 1645 CASE(_return_register_finalizer): { 1646 1647 oop rcvr = LOCALS_OBJECT(0); 1648 VERIFY_OOP(rcvr); 1649 if (rcvr->klass()->has_finalizer()) { 1650 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1651 } 1652 goto handle_return; 1653 } 1654 CASE(_return): { 1655 1656 // Allow a safepoint before returning to frame manager. 1657 SAFEPOINT; 1658 goto handle_return; 1659 } 1660 1661 /* Array access byte-codes */ 1662 1663 /* Every array access byte-code starts out like this */ 1664 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1665 #define ARRAY_INTRO(arrayOff) \ 1666 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1667 jint index = STACK_INT(arrayOff + 1); \ 1668 char message[jintAsStringSize]; \ 1669 CHECK_NULL(arrObj); \ 1670 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1671 sprintf(message, "%d", index); \ 1672 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1673 message, note_rangeCheck_trap); \ 1674 } 1675 1676 /* 32-bit loads. These handle conversion from < 32-bit types */ 1677 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1678 { \ 1679 ARRAY_INTRO(-2); \ 1680 (void)extra; \ 1681 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1682 -2); \ 1683 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1684 } 1685 1686 /* 64-bit loads */ 1687 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1688 { \ 1689 ARRAY_INTRO(-2); \ 1690 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1691 (void)extra; \ 1692 UPDATE_PC_AND_CONTINUE(1); \ 1693 } 1694 1695 CASE(_iaload): 1696 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1697 CASE(_faload): 1698 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1699 CASE(_aaload): { 1700 ARRAY_INTRO(-2); 1701 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1702 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1703 } 1704 CASE(_baload): 1705 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1706 CASE(_caload): 1707 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1708 CASE(_saload): 1709 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1710 CASE(_laload): 1711 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1712 CASE(_daload): 1713 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1714 1715 /* 32-bit stores. These handle conversion to < 32-bit types */ 1716 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1717 { \ 1718 ARRAY_INTRO(-3); \ 1719 (void)extra; \ 1720 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1721 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1722 } 1723 1724 /* 64-bit stores */ 1725 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1726 { \ 1727 ARRAY_INTRO(-4); \ 1728 (void)extra; \ 1729 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1730 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1731 } 1732 1733 CASE(_iastore): 1734 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1735 CASE(_fastore): 1736 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1737 /* 1738 * This one looks different because of the assignability check 1739 */ 1740 CASE(_aastore): { 1741 oop rhsObject = STACK_OBJECT(-1); 1742 VERIFY_OOP(rhsObject); 1743 ARRAY_INTRO( -3); 1744 // arrObj, index are set 1745 if (rhsObject != NULL) { 1746 /* Check assignability of rhsObject into arrObj */ 1747 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1748 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1749 // 1750 // Check for compatibilty. This check must not GC!! 1751 // Seems way more expensive now that we must dispatch 1752 // 1753 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1754 // Decrement counter if subtype check failed. 1755 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1756 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1757 } 1758 // Profile checkcast with null_seen and receiver. 1759 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1760 } else { 1761 // Profile checkcast with null_seen and receiver. 1762 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1763 } 1764 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1765 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1766 } 1767 CASE(_bastore): 1768 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1769 CASE(_castore): 1770 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1771 CASE(_sastore): 1772 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1773 CASE(_lastore): 1774 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1775 CASE(_dastore): 1776 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1777 1778 CASE(_arraylength): 1779 { 1780 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1781 CHECK_NULL(ary); 1782 SET_STACK_INT(ary->length(), -1); 1783 UPDATE_PC_AND_CONTINUE(1); 1784 } 1785 1786 /* monitorenter and monitorexit for locking/unlocking an object */ 1787 1788 CASE(_monitorenter): { 1789 oop lockee = STACK_OBJECT(-1); 1790 // derefing's lockee ought to provoke implicit null check 1791 CHECK_NULL(lockee); 1792 // find a free monitor or one already allocated for this object 1793 // if we find a matching object then we need a new monitor 1794 // since this is recursive enter 1795 BasicObjectLock* limit = istate->monitor_base(); 1796 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1797 BasicObjectLock* entry = NULL; 1798 while (most_recent != limit ) { 1799 if (most_recent->obj() == NULL) entry = most_recent; 1800 else if (most_recent->obj() == lockee) break; 1801 most_recent++; 1802 } 1803 if (entry != NULL) { 1804 entry->set_obj(lockee); 1805 int success = false; 1806 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1807 1808 markOop mark = lockee->mark(); 1809 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1810 // implies UseBiasedLocking 1811 if (mark->has_bias_pattern()) { 1812 uintptr_t thread_ident; 1813 uintptr_t anticipated_bias_locking_value; 1814 thread_ident = (uintptr_t)istate->thread(); 1815 anticipated_bias_locking_value = 1816 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1817 ~((uintptr_t) markOopDesc::age_mask_in_place); 1818 1819 if (anticipated_bias_locking_value == 0) { 1820 // already biased towards this thread, nothing to do 1821 if (PrintBiasedLockingStatistics) { 1822 (* BiasedLocking::biased_lock_entry_count_addr())++; 1823 } 1824 success = true; 1825 } 1826 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1827 // try revoke bias 1828 markOop header = lockee->klass()->prototype_header(); 1829 if (hash != markOopDesc::no_hash) { 1830 header = header->copy_set_hash(hash); 1831 } 1832 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1833 if (PrintBiasedLockingStatistics) 1834 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1835 } 1836 } 1837 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1838 // try rebias 1839 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1840 if (hash != markOopDesc::no_hash) { 1841 new_header = new_header->copy_set_hash(hash); 1842 } 1843 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1844 if (PrintBiasedLockingStatistics) 1845 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1846 } 1847 else { 1848 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1849 } 1850 success = true; 1851 } 1852 else { 1853 // try to bias towards thread in case object is anonymously biased 1854 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1855 (uintptr_t)markOopDesc::age_mask_in_place | 1856 epoch_mask_in_place)); 1857 if (hash != markOopDesc::no_hash) { 1858 header = header->copy_set_hash(hash); 1859 } 1860 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1861 // debugging hint 1862 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1863 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1864 if (PrintBiasedLockingStatistics) 1865 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1866 } 1867 else { 1868 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1869 } 1870 success = true; 1871 } 1872 } 1873 1874 // traditional lightweight locking 1875 if (!success) { 1876 markOop displaced = lockee->mark()->set_unlocked(); 1877 entry->lock()->set_displaced_header(displaced); 1878 bool call_vm = UseHeavyMonitors; 1879 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1880 // Is it simple recursive case? 1881 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1882 entry->lock()->set_displaced_header(NULL); 1883 } else { 1884 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1885 } 1886 } 1887 } 1888 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1889 } else { 1890 istate->set_msg(more_monitors); 1891 UPDATE_PC_AND_RETURN(0); // Re-execute 1892 } 1893 } 1894 1895 CASE(_monitorexit): { 1896 oop lockee = STACK_OBJECT(-1); 1897 CHECK_NULL(lockee); 1898 // derefing's lockee ought to provoke implicit null check 1899 // find our monitor slot 1900 BasicObjectLock* limit = istate->monitor_base(); 1901 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1902 while (most_recent != limit ) { 1903 if ((most_recent)->obj() == lockee) { 1904 BasicLock* lock = most_recent->lock(); 1905 markOop header = lock->displaced_header(); 1906 most_recent->set_obj(NULL); 1907 if (!lockee->mark()->has_bias_pattern()) { 1908 bool call_vm = UseHeavyMonitors; 1909 // If it isn't recursive we either must swap old header or call the runtime 1910 if (header != NULL || call_vm) { 1911 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1912 // restore object for the slow case 1913 most_recent->set_obj(lockee); 1914 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1915 } 1916 } 1917 } 1918 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1919 } 1920 most_recent++; 1921 } 1922 // Need to throw illegal monitor state exception 1923 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1924 ShouldNotReachHere(); 1925 } 1926 1927 /* All of the non-quick opcodes. */ 1928 1929 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1930 * constant pool index in the instruction. 1931 */ 1932 CASE(_getfield): 1933 CASE(_getstatic): 1934 { 1935 u2 index; 1936 ConstantPoolCacheEntry* cache; 1937 index = Bytes::get_native_u2(pc+1); 1938 1939 // QQQ Need to make this as inlined as possible. Probably need to 1940 // split all the bytecode cases out so c++ compiler has a chance 1941 // for constant prop to fold everything possible away. 1942 1943 cache = cp->entry_at(index); 1944 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1945 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1946 handle_exception); 1947 cache = cp->entry_at(index); 1948 } 1949 1950 #ifdef VM_JVMTI 1951 if (_jvmti_interp_events) { 1952 int *count_addr; 1953 oop obj; 1954 // Check to see if a field modification watch has been set 1955 // before we take the time to call into the VM. 1956 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1957 if ( *count_addr > 0 ) { 1958 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1959 obj = (oop)NULL; 1960 } else { 1961 obj = (oop) STACK_OBJECT(-1); 1962 VERIFY_OOP(obj); 1963 } 1964 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1965 obj, 1966 cache), 1967 handle_exception); 1968 } 1969 } 1970 #endif /* VM_JVMTI */ 1971 1972 oop obj; 1973 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1974 Klass* k = cache->f1_as_klass(); 1975 obj = k->java_mirror(); 1976 MORE_STACK(1); // Assume single slot push 1977 } else { 1978 obj = (oop) STACK_OBJECT(-1); 1979 CHECK_NULL(obj); 1980 } 1981 1982 // 1983 // Now store the result on the stack 1984 // 1985 TosState tos_type = cache->flag_state(); 1986 int field_offset = cache->f2_as_index(); 1987 if (cache->is_volatile()) { 1988 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1989 OrderAccess::fence(); 1990 } 1991 if (tos_type == atos) { 1992 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1993 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1994 } else if (tos_type == itos) { 1995 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1996 } else if (tos_type == ltos) { 1997 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1998 MORE_STACK(1); 1999 } else if (tos_type == btos) { 2000 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2001 } else if (tos_type == ctos) { 2002 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2003 } else if (tos_type == stos) { 2004 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2005 } else if (tos_type == ftos) { 2006 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2007 } else { 2008 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2009 MORE_STACK(1); 2010 } 2011 } else { 2012 if (tos_type == atos) { 2013 VERIFY_OOP(obj->obj_field(field_offset)); 2014 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2015 } else if (tos_type == itos) { 2016 SET_STACK_INT(obj->int_field(field_offset), -1); 2017 } else if (tos_type == ltos) { 2018 SET_STACK_LONG(obj->long_field(field_offset), 0); 2019 MORE_STACK(1); 2020 } else if (tos_type == btos) { 2021 SET_STACK_INT(obj->byte_field(field_offset), -1); 2022 } else if (tos_type == ctos) { 2023 SET_STACK_INT(obj->char_field(field_offset), -1); 2024 } else if (tos_type == stos) { 2025 SET_STACK_INT(obj->short_field(field_offset), -1); 2026 } else if (tos_type == ftos) { 2027 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2028 } else { 2029 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2030 MORE_STACK(1); 2031 } 2032 } 2033 2034 UPDATE_PC_AND_CONTINUE(3); 2035 } 2036 2037 CASE(_putfield): 2038 CASE(_putstatic): 2039 { 2040 u2 index = Bytes::get_native_u2(pc+1); 2041 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2042 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2043 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2044 handle_exception); 2045 cache = cp->entry_at(index); 2046 } 2047 2048 #ifdef VM_JVMTI 2049 if (_jvmti_interp_events) { 2050 int *count_addr; 2051 oop obj; 2052 // Check to see if a field modification watch has been set 2053 // before we take the time to call into the VM. 2054 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2055 if ( *count_addr > 0 ) { 2056 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2057 obj = (oop)NULL; 2058 } 2059 else { 2060 if (cache->is_long() || cache->is_double()) { 2061 obj = (oop) STACK_OBJECT(-3); 2062 } else { 2063 obj = (oop) STACK_OBJECT(-2); 2064 } 2065 VERIFY_OOP(obj); 2066 } 2067 2068 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2069 obj, 2070 cache, 2071 (jvalue *)STACK_SLOT(-1)), 2072 handle_exception); 2073 } 2074 } 2075 #endif /* VM_JVMTI */ 2076 2077 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2078 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2079 2080 oop obj; 2081 int count; 2082 TosState tos_type = cache->flag_state(); 2083 2084 count = -1; 2085 if (tos_type == ltos || tos_type == dtos) { 2086 --count; 2087 } 2088 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2089 Klass* k = cache->f1_as_klass(); 2090 obj = k->java_mirror(); 2091 } else { 2092 --count; 2093 obj = (oop) STACK_OBJECT(count); 2094 CHECK_NULL(obj); 2095 } 2096 2097 // 2098 // Now store the result 2099 // 2100 int field_offset = cache->f2_as_index(); 2101 if (cache->is_volatile()) { 2102 if (tos_type == itos) { 2103 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2104 } else if (tos_type == atos) { 2105 VERIFY_OOP(STACK_OBJECT(-1)); 2106 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2107 } else if (tos_type == btos) { 2108 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2109 } else if (tos_type == ltos) { 2110 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2111 } else if (tos_type == ctos) { 2112 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2113 } else if (tos_type == stos) { 2114 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2115 } else if (tos_type == ftos) { 2116 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2117 } else { 2118 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2119 } 2120 OrderAccess::storeload(); 2121 } else { 2122 if (tos_type == itos) { 2123 obj->int_field_put(field_offset, STACK_INT(-1)); 2124 } else if (tos_type == atos) { 2125 VERIFY_OOP(STACK_OBJECT(-1)); 2126 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2127 } else if (tos_type == btos) { 2128 obj->byte_field_put(field_offset, STACK_INT(-1)); 2129 } else if (tos_type == ltos) { 2130 obj->long_field_put(field_offset, STACK_LONG(-1)); 2131 } else if (tos_type == ctos) { 2132 obj->char_field_put(field_offset, STACK_INT(-1)); 2133 } else if (tos_type == stos) { 2134 obj->short_field_put(field_offset, STACK_INT(-1)); 2135 } else if (tos_type == ftos) { 2136 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2137 } else { 2138 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2139 } 2140 } 2141 2142 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2143 } 2144 2145 CASE(_new): { 2146 u2 index = Bytes::get_Java_u2(pc+1); 2147 ConstantPool* constants = istate->method()->constants(); 2148 if (!constants->tag_at(index).is_unresolved_klass()) { 2149 // Make sure klass is initialized and doesn't have a finalizer 2150 Klass* entry = constants->slot_at(index).get_klass(); 2151 InstanceKlass* ik = InstanceKlass::cast(entry); 2152 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2153 size_t obj_size = ik->size_helper(); 2154 oop result = NULL; 2155 // If the TLAB isn't pre-zeroed then we'll have to do it 2156 bool need_zero = !ZeroTLAB; 2157 if (UseTLAB) { 2158 result = (oop) THREAD->tlab().allocate(obj_size); 2159 } 2160 // Disable non-TLAB-based fast-path, because profiling requires that all 2161 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2162 // returns NULL. 2163 #ifndef CC_INTERP_PROFILE 2164 if (result == NULL) { 2165 need_zero = true; 2166 // Try allocate in shared eden 2167 retry: 2168 HeapWord* compare_to = *Universe::heap()->top_addr(); 2169 HeapWord* new_top = compare_to + obj_size; 2170 if (new_top <= *Universe::heap()->end_addr()) { 2171 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2172 goto retry; 2173 } 2174 result = (oop) compare_to; 2175 } 2176 } 2177 #endif 2178 if (result != NULL) { 2179 // Initialize object (if nonzero size and need) and then the header 2180 if (need_zero ) { 2181 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2182 obj_size -= sizeof(oopDesc) / oopSize; 2183 if (obj_size > 0 ) { 2184 memset(to_zero, 0, obj_size * HeapWordSize); 2185 } 2186 } 2187 if (UseBiasedLocking) { 2188 result->set_mark(ik->prototype_header()); 2189 } else { 2190 result->set_mark(markOopDesc::prototype()); 2191 } 2192 result->set_klass_gap(0); 2193 result->set_klass(ik); 2194 // Must prevent reordering of stores for object initialization 2195 // with stores that publish the new object. 2196 OrderAccess::storestore(); 2197 SET_STACK_OBJECT(result, 0); 2198 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2199 } 2200 } 2201 } 2202 // Slow case allocation 2203 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2204 handle_exception); 2205 // Must prevent reordering of stores for object initialization 2206 // with stores that publish the new object. 2207 OrderAccess::storestore(); 2208 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2209 THREAD->set_vm_result(NULL); 2210 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2211 } 2212 CASE(_anewarray): { 2213 u2 index = Bytes::get_Java_u2(pc+1); 2214 jint size = STACK_INT(-1); 2215 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2216 handle_exception); 2217 // Must prevent reordering of stores for object initialization 2218 // with stores that publish the new object. 2219 OrderAccess::storestore(); 2220 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2221 THREAD->set_vm_result(NULL); 2222 UPDATE_PC_AND_CONTINUE(3); 2223 } 2224 CASE(_multianewarray): { 2225 jint dims = *(pc+3); 2226 jint size = STACK_INT(-1); 2227 // stack grows down, dimensions are up! 2228 jint *dimarray = 2229 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2230 Interpreter::stackElementWords-1]; 2231 //adjust pointer to start of stack element 2232 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2233 handle_exception); 2234 // Must prevent reordering of stores for object initialization 2235 // with stores that publish the new object. 2236 OrderAccess::storestore(); 2237 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2238 THREAD->set_vm_result(NULL); 2239 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2240 } 2241 CASE(_checkcast): 2242 if (STACK_OBJECT(-1) != NULL) { 2243 VERIFY_OOP(STACK_OBJECT(-1)); 2244 u2 index = Bytes::get_Java_u2(pc+1); 2245 // Constant pool may have actual klass or unresolved klass. If it is 2246 // unresolved we must resolve it. 2247 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2248 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2249 } 2250 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2251 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2252 // 2253 // Check for compatibilty. This check must not GC!! 2254 // Seems way more expensive now that we must dispatch. 2255 // 2256 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2257 // Decrement counter at checkcast. 2258 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2259 ResourceMark rm(THREAD); 2260 const char* objName = objKlass->external_name(); 2261 const char* klassName = klassOf->external_name(); 2262 char* message = SharedRuntime::generate_class_cast_message( 2263 objName, klassName); 2264 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2265 } 2266 // Profile checkcast with null_seen and receiver. 2267 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2268 } else { 2269 // Profile checkcast with null_seen and receiver. 2270 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2271 } 2272 UPDATE_PC_AND_CONTINUE(3); 2273 2274 CASE(_instanceof): 2275 if (STACK_OBJECT(-1) == NULL) { 2276 SET_STACK_INT(0, -1); 2277 // Profile instanceof with null_seen and receiver. 2278 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2279 } else { 2280 VERIFY_OOP(STACK_OBJECT(-1)); 2281 u2 index = Bytes::get_Java_u2(pc+1); 2282 // Constant pool may have actual klass or unresolved klass. If it is 2283 // unresolved we must resolve it. 2284 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2285 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2286 } 2287 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2288 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2289 // 2290 // Check for compatibilty. This check must not GC!! 2291 // Seems way more expensive now that we must dispatch. 2292 // 2293 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2294 SET_STACK_INT(1, -1); 2295 } else { 2296 SET_STACK_INT(0, -1); 2297 // Decrement counter at checkcast. 2298 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2299 } 2300 // Profile instanceof with null_seen and receiver. 2301 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2302 } 2303 UPDATE_PC_AND_CONTINUE(3); 2304 2305 CASE(_ldc_w): 2306 CASE(_ldc): 2307 { 2308 u2 index; 2309 bool wide = false; 2310 int incr = 2; // frequent case 2311 if (opcode == Bytecodes::_ldc) { 2312 index = pc[1]; 2313 } else { 2314 index = Bytes::get_Java_u2(pc+1); 2315 incr = 3; 2316 wide = true; 2317 } 2318 2319 ConstantPool* constants = METHOD->constants(); 2320 switch (constants->tag_at(index).value()) { 2321 case JVM_CONSTANT_Integer: 2322 SET_STACK_INT(constants->int_at(index), 0); 2323 break; 2324 2325 case JVM_CONSTANT_Float: 2326 SET_STACK_FLOAT(constants->float_at(index), 0); 2327 break; 2328 2329 case JVM_CONSTANT_String: 2330 { 2331 oop result = constants->resolved_references()->obj_at(index); 2332 if (result == NULL) { 2333 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2334 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2335 THREAD->set_vm_result(NULL); 2336 } else { 2337 VERIFY_OOP(result); 2338 SET_STACK_OBJECT(result, 0); 2339 } 2340 break; 2341 } 2342 2343 case JVM_CONSTANT_Class: 2344 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2345 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2346 break; 2347 2348 case JVM_CONSTANT_UnresolvedClass: 2349 case JVM_CONSTANT_UnresolvedClassInError: 2350 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2351 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2352 THREAD->set_vm_result(NULL); 2353 break; 2354 2355 default: ShouldNotReachHere(); 2356 } 2357 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2358 } 2359 2360 CASE(_ldc2_w): 2361 { 2362 u2 index = Bytes::get_Java_u2(pc+1); 2363 2364 ConstantPool* constants = METHOD->constants(); 2365 switch (constants->tag_at(index).value()) { 2366 2367 case JVM_CONSTANT_Long: 2368 SET_STACK_LONG(constants->long_at(index), 1); 2369 break; 2370 2371 case JVM_CONSTANT_Double: 2372 SET_STACK_DOUBLE(constants->double_at(index), 1); 2373 break; 2374 default: ShouldNotReachHere(); 2375 } 2376 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2377 } 2378 2379 CASE(_fast_aldc_w): 2380 CASE(_fast_aldc): { 2381 u2 index; 2382 int incr; 2383 if (opcode == Bytecodes::_fast_aldc) { 2384 index = pc[1]; 2385 incr = 2; 2386 } else { 2387 index = Bytes::get_native_u2(pc+1); 2388 incr = 3; 2389 } 2390 2391 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2392 // This kind of CP cache entry does not need to match the flags byte, because 2393 // there is a 1-1 relation between bytecode type and CP entry type. 2394 ConstantPool* constants = METHOD->constants(); 2395 oop result = constants->resolved_references()->obj_at(index); 2396 if (result == NULL) { 2397 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2398 handle_exception); 2399 result = THREAD->vm_result(); 2400 } 2401 2402 VERIFY_OOP(result); 2403 SET_STACK_OBJECT(result, 0); 2404 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2405 } 2406 2407 CASE(_invokedynamic): { 2408 2409 u4 index = Bytes::get_native_u4(pc+1); 2410 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2411 2412 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2413 // This kind of CP cache entry does not need to match the flags byte, because 2414 // there is a 1-1 relation between bytecode type and CP entry type. 2415 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2416 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2417 handle_exception); 2418 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2419 } 2420 2421 Method* method = cache->f1_as_method(); 2422 if (VerifyOops) method->verify(); 2423 2424 if (cache->has_appendix()) { 2425 ConstantPool* constants = METHOD->constants(); 2426 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2427 MORE_STACK(1); 2428 } 2429 2430 istate->set_msg(call_method); 2431 istate->set_callee(method); 2432 istate->set_callee_entry_point(method->from_interpreted_entry()); 2433 istate->set_bcp_advance(5); 2434 2435 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2436 BI_PROFILE_UPDATE_CALL(); 2437 2438 UPDATE_PC_AND_RETURN(0); // I'll be back... 2439 } 2440 2441 CASE(_invokehandle): { 2442 2443 u2 index = Bytes::get_native_u2(pc+1); 2444 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2445 2446 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2447 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2448 handle_exception); 2449 cache = cp->entry_at(index); 2450 } 2451 2452 Method* method = cache->f1_as_method(); 2453 if (VerifyOops) method->verify(); 2454 2455 if (cache->has_appendix()) { 2456 ConstantPool* constants = METHOD->constants(); 2457 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2458 MORE_STACK(1); 2459 } 2460 2461 istate->set_msg(call_method); 2462 istate->set_callee(method); 2463 istate->set_callee_entry_point(method->from_interpreted_entry()); 2464 istate->set_bcp_advance(3); 2465 2466 // Invokehandle has got a call counter, just like a final call -> increment! 2467 BI_PROFILE_UPDATE_FINALCALL(); 2468 2469 UPDATE_PC_AND_RETURN(0); // I'll be back... 2470 } 2471 2472 CASE(_invokeinterface): { 2473 u2 index = Bytes::get_native_u2(pc+1); 2474 2475 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2476 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2477 2478 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2479 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2480 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2481 handle_exception); 2482 cache = cp->entry_at(index); 2483 } 2484 2485 istate->set_msg(call_method); 2486 2487 // Special case of invokeinterface called for virtual method of 2488 // java.lang.Object. See cpCacheOop.cpp for details. 2489 // This code isn't produced by javac, but could be produced by 2490 // another compliant java compiler. 2491 if (cache->is_forced_virtual()) { 2492 Method* callee; 2493 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2494 if (cache->is_vfinal()) { 2495 callee = cache->f2_as_vfinal_method(); 2496 // Profile 'special case of invokeinterface' final call. 2497 BI_PROFILE_UPDATE_FINALCALL(); 2498 } else { 2499 // Get receiver. 2500 int parms = cache->parameter_size(); 2501 // Same comments as invokevirtual apply here. 2502 oop rcvr = STACK_OBJECT(-parms); 2503 VERIFY_OOP(rcvr); 2504 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2505 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2506 // Profile 'special case of invokeinterface' virtual call. 2507 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2508 } 2509 istate->set_callee(callee); 2510 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2511 #ifdef VM_JVMTI 2512 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2513 istate->set_callee_entry_point(callee->interpreter_entry()); 2514 } 2515 #endif /* VM_JVMTI */ 2516 istate->set_bcp_advance(5); 2517 UPDATE_PC_AND_RETURN(0); // I'll be back... 2518 } 2519 2520 // this could definitely be cleaned up QQQ 2521 Method* callee; 2522 Klass* iclass = cache->f1_as_klass(); 2523 // InstanceKlass* interface = (InstanceKlass*) iclass; 2524 // get receiver 2525 int parms = cache->parameter_size(); 2526 oop rcvr = STACK_OBJECT(-parms); 2527 CHECK_NULL(rcvr); 2528 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2529 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2530 int i; 2531 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2532 if (ki->interface_klass() == iclass) break; 2533 } 2534 // If the interface isn't found, this class doesn't implement this 2535 // interface. The link resolver checks this but only for the first 2536 // time this interface is called. 2537 if (i == int2->itable_length()) { 2538 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2539 } 2540 int mindex = cache->f2_as_index(); 2541 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2542 callee = im[mindex].method(); 2543 if (callee == NULL) { 2544 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2545 } 2546 2547 // Profile virtual call. 2548 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2549 2550 istate->set_callee(callee); 2551 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2552 #ifdef VM_JVMTI 2553 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2554 istate->set_callee_entry_point(callee->interpreter_entry()); 2555 } 2556 #endif /* VM_JVMTI */ 2557 istate->set_bcp_advance(5); 2558 UPDATE_PC_AND_RETURN(0); // I'll be back... 2559 } 2560 2561 CASE(_invokevirtual): 2562 CASE(_invokespecial): 2563 CASE(_invokestatic): { 2564 u2 index = Bytes::get_native_u2(pc+1); 2565 2566 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2567 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2568 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2569 2570 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2571 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2572 handle_exception); 2573 cache = cp->entry_at(index); 2574 } 2575 2576 istate->set_msg(call_method); 2577 { 2578 Method* callee; 2579 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2580 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2581 if (cache->is_vfinal()) { 2582 callee = cache->f2_as_vfinal_method(); 2583 // Profile final call. 2584 BI_PROFILE_UPDATE_FINALCALL(); 2585 } else { 2586 // get receiver 2587 int parms = cache->parameter_size(); 2588 // this works but needs a resourcemark and seems to create a vtable on every call: 2589 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2590 // 2591 // this fails with an assert 2592 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2593 // but this works 2594 oop rcvr = STACK_OBJECT(-parms); 2595 VERIFY_OOP(rcvr); 2596 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2597 /* 2598 Executing this code in java.lang.String: 2599 public String(char value[]) { 2600 this.count = value.length; 2601 this.value = (char[])value.clone(); 2602 } 2603 2604 a find on rcvr->klass() reports: 2605 {type array char}{type array class} 2606 - klass: {other class} 2607 2608 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2609 because rcvr->klass()->is_instance_klass() == 0 2610 However it seems to have a vtable in the right location. Huh? 2611 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2612 */ 2613 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2614 // Profile virtual call. 2615 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2616 } 2617 } else { 2618 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2619 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2620 } 2621 callee = cache->f1_as_method(); 2622 2623 // Profile call. 2624 BI_PROFILE_UPDATE_CALL(); 2625 } 2626 2627 istate->set_callee(callee); 2628 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2629 #ifdef VM_JVMTI 2630 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2631 istate->set_callee_entry_point(callee->interpreter_entry()); 2632 } 2633 #endif /* VM_JVMTI */ 2634 istate->set_bcp_advance(3); 2635 UPDATE_PC_AND_RETURN(0); // I'll be back... 2636 } 2637 } 2638 2639 /* Allocate memory for a new java object. */ 2640 2641 CASE(_newarray): { 2642 BasicType atype = (BasicType) *(pc+1); 2643 jint size = STACK_INT(-1); 2644 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2645 handle_exception); 2646 // Must prevent reordering of stores for object initialization 2647 // with stores that publish the new object. 2648 OrderAccess::storestore(); 2649 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2650 THREAD->set_vm_result(NULL); 2651 2652 UPDATE_PC_AND_CONTINUE(2); 2653 } 2654 2655 /* Throw an exception. */ 2656 2657 CASE(_athrow): { 2658 oop except_oop = STACK_OBJECT(-1); 2659 CHECK_NULL(except_oop); 2660 // set pending_exception so we use common code 2661 THREAD->set_pending_exception(except_oop, NULL, 0); 2662 goto handle_exception; 2663 } 2664 2665 /* goto and jsr. They are exactly the same except jsr pushes 2666 * the address of the next instruction first. 2667 */ 2668 2669 CASE(_jsr): { 2670 /* push bytecode index on stack */ 2671 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2672 MORE_STACK(1); 2673 /* FALL THROUGH */ 2674 } 2675 2676 CASE(_goto): 2677 { 2678 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2679 // Profile jump. 2680 BI_PROFILE_UPDATE_JUMP(); 2681 address branch_pc = pc; 2682 UPDATE_PC(offset); 2683 DO_BACKEDGE_CHECKS(offset, branch_pc); 2684 CONTINUE; 2685 } 2686 2687 CASE(_jsr_w): { 2688 /* push return address on the stack */ 2689 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2690 MORE_STACK(1); 2691 /* FALL THROUGH */ 2692 } 2693 2694 CASE(_goto_w): 2695 { 2696 int32_t offset = Bytes::get_Java_u4(pc + 1); 2697 // Profile jump. 2698 BI_PROFILE_UPDATE_JUMP(); 2699 address branch_pc = pc; 2700 UPDATE_PC(offset); 2701 DO_BACKEDGE_CHECKS(offset, branch_pc); 2702 CONTINUE; 2703 } 2704 2705 /* return from a jsr or jsr_w */ 2706 2707 CASE(_ret): { 2708 // Profile ret. 2709 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2710 // Now, update the pc. 2711 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2712 UPDATE_PC_AND_CONTINUE(0); 2713 } 2714 2715 /* debugger breakpoint */ 2716 2717 CASE(_breakpoint): { 2718 Bytecodes::Code original_bytecode; 2719 DECACHE_STATE(); 2720 SET_LAST_JAVA_FRAME(); 2721 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2722 METHOD, pc); 2723 RESET_LAST_JAVA_FRAME(); 2724 CACHE_STATE(); 2725 if (THREAD->has_pending_exception()) goto handle_exception; 2726 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2727 handle_exception); 2728 2729 opcode = (jubyte)original_bytecode; 2730 goto opcode_switch; 2731 } 2732 2733 DEFAULT: 2734 fatal("Unimplemented opcode %d = %s", opcode, 2735 Bytecodes::name((Bytecodes::Code)opcode)); 2736 goto finish; 2737 2738 } /* switch(opc) */ 2739 2740 2741 #ifdef USELABELS 2742 check_for_exception: 2743 #endif 2744 { 2745 if (!THREAD->has_pending_exception()) { 2746 CONTINUE; 2747 } 2748 /* We will be gcsafe soon, so flush our state. */ 2749 DECACHE_PC(); 2750 goto handle_exception; 2751 } 2752 do_continue: ; 2753 2754 } /* while (1) interpreter loop */ 2755 2756 2757 // An exception exists in the thread state see whether this activation can handle it 2758 handle_exception: { 2759 2760 HandleMarkCleaner __hmc(THREAD); 2761 Handle except_oop(THREAD, THREAD->pending_exception()); 2762 // Prevent any subsequent HandleMarkCleaner in the VM 2763 // from freeing the except_oop handle. 2764 HandleMark __hm(THREAD); 2765 2766 THREAD->clear_pending_exception(); 2767 assert(except_oop(), "No exception to process"); 2768 intptr_t continuation_bci; 2769 // expression stack is emptied 2770 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2771 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2772 handle_exception); 2773 2774 except_oop = THREAD->vm_result(); 2775 THREAD->set_vm_result(NULL); 2776 if (continuation_bci >= 0) { 2777 // Place exception on top of stack 2778 SET_STACK_OBJECT(except_oop(), 0); 2779 MORE_STACK(1); 2780 pc = METHOD->code_base() + continuation_bci; 2781 if (TraceExceptions) { 2782 ttyLocker ttyl; 2783 ResourceMark rm; 2784 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); 2785 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2786 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2787 (int)(istate->bcp() - METHOD->code_base()), 2788 (int)continuation_bci, p2i(THREAD)); 2789 } 2790 // for AbortVMOnException flag 2791 Exceptions::debug_check_abort(except_oop); 2792 2793 // Update profiling data. 2794 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2795 goto run; 2796 } 2797 if (TraceExceptions) { 2798 ttyLocker ttyl; 2799 ResourceMark rm; 2800 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); 2801 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2802 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 2803 (int)(istate->bcp() - METHOD->code_base()), 2804 p2i(THREAD)); 2805 } 2806 // for AbortVMOnException flag 2807 Exceptions::debug_check_abort(except_oop); 2808 2809 // No handler in this activation, unwind and try again 2810 THREAD->set_pending_exception(except_oop(), NULL, 0); 2811 goto handle_return; 2812 } // handle_exception: 2813 2814 // Return from an interpreter invocation with the result of the interpretation 2815 // on the top of the Java Stack (or a pending exception) 2816 2817 handle_Pop_Frame: { 2818 2819 // We don't really do anything special here except we must be aware 2820 // that we can get here without ever locking the method (if sync). 2821 // Also we skip the notification of the exit. 2822 2823 istate->set_msg(popping_frame); 2824 // Clear pending so while the pop is in process 2825 // we don't start another one if a call_vm is done. 2826 THREAD->clr_pop_frame_pending(); 2827 // Let interpreter (only) see the we're in the process of popping a frame 2828 THREAD->set_pop_frame_in_process(); 2829 2830 goto handle_return; 2831 2832 } // handle_Pop_Frame 2833 2834 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2835 // given by the invoker of the early return. 2836 handle_Early_Return: { 2837 2838 istate->set_msg(early_return); 2839 2840 // Clear expression stack. 2841 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2842 2843 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2844 2845 // Push the value to be returned. 2846 switch (istate->method()->result_type()) { 2847 case T_BOOLEAN: 2848 case T_SHORT: 2849 case T_BYTE: 2850 case T_CHAR: 2851 case T_INT: 2852 SET_STACK_INT(ts->earlyret_value().i, 0); 2853 MORE_STACK(1); 2854 break; 2855 case T_LONG: 2856 SET_STACK_LONG(ts->earlyret_value().j, 1); 2857 MORE_STACK(2); 2858 break; 2859 case T_FLOAT: 2860 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2861 MORE_STACK(1); 2862 break; 2863 case T_DOUBLE: 2864 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2865 MORE_STACK(2); 2866 break; 2867 case T_ARRAY: 2868 case T_OBJECT: 2869 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2870 MORE_STACK(1); 2871 break; 2872 } 2873 2874 ts->clr_earlyret_value(); 2875 ts->set_earlyret_oop(NULL); 2876 ts->clr_earlyret_pending(); 2877 2878 // Fall through to handle_return. 2879 2880 } // handle_Early_Return 2881 2882 handle_return: { 2883 // A storestore barrier is required to order initialization of 2884 // final fields with publishing the reference to the object that 2885 // holds the field. Without the barrier the value of final fields 2886 // can be observed to change. 2887 OrderAccess::storestore(); 2888 2889 DECACHE_STATE(); 2890 2891 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2892 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2893 Handle original_exception(THREAD, THREAD->pending_exception()); 2894 Handle illegal_state_oop(THREAD, NULL); 2895 2896 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2897 // in any following VM entries from freeing our live handles, but illegal_state_oop 2898 // isn't really allocated yet and so doesn't become live until later and 2899 // in unpredicatable places. Instead we must protect the places where we enter the 2900 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2901 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2902 // unfortunately isn't possible. 2903 2904 THREAD->clear_pending_exception(); 2905 2906 // 2907 // As far as we are concerned we have returned. If we have a pending exception 2908 // that will be returned as this invocation's result. However if we get any 2909 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2910 // will be our final result (i.e. monitor exception trumps a pending exception). 2911 // 2912 2913 // If we never locked the method (or really passed the point where we would have), 2914 // there is no need to unlock it (or look for other monitors), since that 2915 // could not have happened. 2916 2917 if (THREAD->do_not_unlock()) { 2918 2919 // Never locked, reset the flag now because obviously any caller must 2920 // have passed their point of locking for us to have gotten here. 2921 2922 THREAD->clr_do_not_unlock(); 2923 } else { 2924 // At this point we consider that we have returned. We now check that the 2925 // locks were properly block structured. If we find that they were not 2926 // used properly we will return with an illegal monitor exception. 2927 // The exception is checked by the caller not the callee since this 2928 // checking is considered to be part of the invocation and therefore 2929 // in the callers scope (JVM spec 8.13). 2930 // 2931 // Another weird thing to watch for is if the method was locked 2932 // recursively and then not exited properly. This means we must 2933 // examine all the entries in reverse time(and stack) order and 2934 // unlock as we find them. If we find the method monitor before 2935 // we are at the initial entry then we should throw an exception. 2936 // It is not clear the template based interpreter does this 2937 // correctly 2938 2939 BasicObjectLock* base = istate->monitor_base(); 2940 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2941 bool method_unlock_needed = METHOD->is_synchronized(); 2942 // We know the initial monitor was used for the method don't check that 2943 // slot in the loop 2944 if (method_unlock_needed) base--; 2945 2946 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2947 while (end < base) { 2948 oop lockee = end->obj(); 2949 if (lockee != NULL) { 2950 BasicLock* lock = end->lock(); 2951 markOop header = lock->displaced_header(); 2952 end->set_obj(NULL); 2953 2954 if (!lockee->mark()->has_bias_pattern()) { 2955 // If it isn't recursive we either must swap old header or call the runtime 2956 if (header != NULL) { 2957 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2958 // restore object for the slow case 2959 end->set_obj(lockee); 2960 { 2961 // Prevent any HandleMarkCleaner from freeing our live handles 2962 HandleMark __hm(THREAD); 2963 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2964 } 2965 } 2966 } 2967 } 2968 // One error is plenty 2969 if (illegal_state_oop() == NULL && !suppress_error) { 2970 { 2971 // Prevent any HandleMarkCleaner from freeing our live handles 2972 HandleMark __hm(THREAD); 2973 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2974 } 2975 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2976 illegal_state_oop = THREAD->pending_exception(); 2977 THREAD->clear_pending_exception(); 2978 } 2979 } 2980 end++; 2981 } 2982 // Unlock the method if needed 2983 if (method_unlock_needed) { 2984 if (base->obj() == NULL) { 2985 // The method is already unlocked this is not good. 2986 if (illegal_state_oop() == NULL && !suppress_error) { 2987 { 2988 // Prevent any HandleMarkCleaner from freeing our live handles 2989 HandleMark __hm(THREAD); 2990 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2991 } 2992 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2993 illegal_state_oop = THREAD->pending_exception(); 2994 THREAD->clear_pending_exception(); 2995 } 2996 } else { 2997 // 2998 // The initial monitor is always used for the method 2999 // However if that slot is no longer the oop for the method it was unlocked 3000 // and reused by something that wasn't unlocked! 3001 // 3002 // deopt can come in with rcvr dead because c2 knows 3003 // its value is preserved in the monitor. So we can't use locals[0] at all 3004 // and must use first monitor slot. 3005 // 3006 oop rcvr = base->obj(); 3007 if (rcvr == NULL) { 3008 if (!suppress_error) { 3009 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3010 illegal_state_oop = THREAD->pending_exception(); 3011 THREAD->clear_pending_exception(); 3012 } 3013 } else if (UseHeavyMonitors) { 3014 { 3015 // Prevent any HandleMarkCleaner from freeing our live handles. 3016 HandleMark __hm(THREAD); 3017 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3018 } 3019 if (THREAD->has_pending_exception()) { 3020 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3021 THREAD->clear_pending_exception(); 3022 } 3023 } else { 3024 BasicLock* lock = base->lock(); 3025 markOop header = lock->displaced_header(); 3026 base->set_obj(NULL); 3027 3028 if (!rcvr->mark()->has_bias_pattern()) { 3029 base->set_obj(NULL); 3030 // If it isn't recursive we either must swap old header or call the runtime 3031 if (header != NULL) { 3032 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3033 // restore object for the slow case 3034 base->set_obj(rcvr); 3035 { 3036 // Prevent any HandleMarkCleaner from freeing our live handles 3037 HandleMark __hm(THREAD); 3038 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3039 } 3040 if (THREAD->has_pending_exception()) { 3041 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3042 THREAD->clear_pending_exception(); 3043 } 3044 } 3045 } 3046 } 3047 } 3048 } 3049 } 3050 } 3051 // Clear the do_not_unlock flag now. 3052 THREAD->clr_do_not_unlock(); 3053 3054 // 3055 // Notify jvmti/jvmdi 3056 // 3057 // NOTE: we do not notify a method_exit if we have a pending exception, 3058 // including an exception we generate for unlocking checks. In the former 3059 // case, JVMDI has already been notified by our call for the exception handler 3060 // and in both cases as far as JVMDI is concerned we have already returned. 3061 // If we notify it again JVMDI will be all confused about how many frames 3062 // are still on the stack (4340444). 3063 // 3064 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3065 // method_exit events whenever we leave an activation unless it was done 3066 // for popframe. This is nothing like jvmdi. However we are passing the 3067 // tests at the moment (apparently because they are jvmdi based) so rather 3068 // than change this code and possibly fail tests we will leave it alone 3069 // (with this note) in anticipation of changing the vm and the tests 3070 // simultaneously. 3071 3072 3073 // 3074 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3075 3076 3077 3078 #ifdef VM_JVMTI 3079 if (_jvmti_interp_events) { 3080 // Whenever JVMTI puts a thread in interp_only_mode, method 3081 // entry/exit events are sent for that thread to track stack depth. 3082 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3083 { 3084 // Prevent any HandleMarkCleaner from freeing our live handles 3085 HandleMark __hm(THREAD); 3086 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3087 } 3088 } 3089 } 3090 #endif /* VM_JVMTI */ 3091 3092 // 3093 // See if we are returning any exception 3094 // A pending exception that was pending prior to a possible popping frame 3095 // overrides the popping frame. 3096 // 3097 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3098 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3099 // Inform the frame manager we have no result. 3100 istate->set_msg(throwing_exception); 3101 if (illegal_state_oop() != NULL) 3102 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3103 else 3104 THREAD->set_pending_exception(original_exception(), NULL, 0); 3105 UPDATE_PC_AND_RETURN(0); 3106 } 3107 3108 if (istate->msg() == popping_frame) { 3109 // Make it simpler on the assembly code and set the message for the frame pop. 3110 // returns 3111 if (istate->prev() == NULL) { 3112 // We must be returning to a deoptimized frame (because popframe only happens between 3113 // two interpreted frames). We need to save the current arguments in C heap so that 3114 // the deoptimized frame when it restarts can copy the arguments to its expression 3115 // stack and re-execute the call. We also have to notify deoptimization that this 3116 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3117 // java expression stack. Yuck. 3118 // 3119 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3120 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3121 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3122 } 3123 } else { 3124 istate->set_msg(return_from_method); 3125 } 3126 3127 // Normal return 3128 // Advance the pc and return to frame manager 3129 UPDATE_PC_AND_RETURN(1); 3130 } /* handle_return: */ 3131 3132 // This is really a fatal error return 3133 3134 finish: 3135 DECACHE_TOS(); 3136 DECACHE_PC(); 3137 3138 return; 3139 } 3140 3141 /* 3142 * All the code following this point is only produced once and is not present 3143 * in the JVMTI version of the interpreter 3144 */ 3145 3146 #ifndef VM_JVMTI 3147 3148 // This constructor should only be used to contruct the object to signal 3149 // interpreter initialization. All other instances should be created by 3150 // the frame manager. 3151 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3152 if (msg != initialize) ShouldNotReachHere(); 3153 _msg = msg; 3154 _self_link = this; 3155 _prev_link = NULL; 3156 } 3157 3158 // Inline static functions for Java Stack and Local manipulation 3159 3160 // The implementations are platform dependent. We have to worry about alignment 3161 // issues on some machines which can change on the same platform depending on 3162 // whether it is an LP64 machine also. 3163 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3164 return (address) tos[Interpreter::expr_index_at(-offset)]; 3165 } 3166 3167 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3168 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3169 } 3170 3171 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3172 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3173 } 3174 3175 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3176 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3177 } 3178 3179 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3180 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3181 } 3182 3183 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3184 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3185 } 3186 3187 // only used for value types 3188 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3189 int offset) { 3190 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3191 } 3192 3193 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3194 int offset) { 3195 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3196 } 3197 3198 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3199 int offset) { 3200 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3201 } 3202 3203 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3204 int offset) { 3205 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3206 } 3207 3208 // needs to be platform dep for the 32 bit platforms. 3209 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3210 int offset) { 3211 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3212 } 3213 3214 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3215 address addr, int offset) { 3216 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3217 ((VMJavaVal64*)addr)->d); 3218 } 3219 3220 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3221 int offset) { 3222 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3223 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3224 } 3225 3226 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3227 address addr, int offset) { 3228 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3229 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3230 ((VMJavaVal64*)addr)->l; 3231 } 3232 3233 // Locals 3234 3235 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3236 return (address)locals[Interpreter::local_index_at(-offset)]; 3237 } 3238 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3239 return (jint)locals[Interpreter::local_index_at(-offset)]; 3240 } 3241 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3242 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3243 } 3244 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3245 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3246 } 3247 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3248 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3249 } 3250 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3251 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3252 } 3253 3254 // Returns the address of locals value. 3255 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3256 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3257 } 3258 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3259 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3260 } 3261 3262 // Used for local value or returnAddress 3263 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3264 address value, int offset) { 3265 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3266 } 3267 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3268 jint value, int offset) { 3269 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3270 } 3271 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3272 jfloat value, int offset) { 3273 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3274 } 3275 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3276 oop value, int offset) { 3277 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3278 } 3279 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3280 jdouble value, int offset) { 3281 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3282 } 3283 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3284 jlong value, int offset) { 3285 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3286 } 3287 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3288 address addr, int offset) { 3289 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3290 } 3291 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3292 address addr, int offset) { 3293 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3294 } 3295 3296 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3297 intptr_t* locals, int locals_offset) { 3298 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3299 locals[Interpreter::local_index_at(-locals_offset)] = value; 3300 } 3301 3302 3303 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3304 int to_offset) { 3305 tos[Interpreter::expr_index_at(-to_offset)] = 3306 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3307 } 3308 3309 void BytecodeInterpreter::dup(intptr_t *tos) { 3310 copy_stack_slot(tos, -1, 0); 3311 } 3312 void BytecodeInterpreter::dup2(intptr_t *tos) { 3313 copy_stack_slot(tos, -2, 0); 3314 copy_stack_slot(tos, -1, 1); 3315 } 3316 3317 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3318 /* insert top word two down */ 3319 copy_stack_slot(tos, -1, 0); 3320 copy_stack_slot(tos, -2, -1); 3321 copy_stack_slot(tos, 0, -2); 3322 } 3323 3324 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3325 /* insert top word three down */ 3326 copy_stack_slot(tos, -1, 0); 3327 copy_stack_slot(tos, -2, -1); 3328 copy_stack_slot(tos, -3, -2); 3329 copy_stack_slot(tos, 0, -3); 3330 } 3331 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3332 /* insert top 2 slots three down */ 3333 copy_stack_slot(tos, -1, 1); 3334 copy_stack_slot(tos, -2, 0); 3335 copy_stack_slot(tos, -3, -1); 3336 copy_stack_slot(tos, 1, -2); 3337 copy_stack_slot(tos, 0, -3); 3338 } 3339 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3340 /* insert top 2 slots four down */ 3341 copy_stack_slot(tos, -1, 1); 3342 copy_stack_slot(tos, -2, 0); 3343 copy_stack_slot(tos, -3, -1); 3344 copy_stack_slot(tos, -4, -2); 3345 copy_stack_slot(tos, 1, -3); 3346 copy_stack_slot(tos, 0, -4); 3347 } 3348 3349 3350 void BytecodeInterpreter::swap(intptr_t *tos) { 3351 // swap top two elements 3352 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3353 // Copy -2 entry to -1 3354 copy_stack_slot(tos, -2, -1); 3355 // Store saved -1 entry into -2 3356 tos[Interpreter::expr_index_at(2)] = val; 3357 } 3358 // -------------------------------------------------------------------------------- 3359 // Non-product code 3360 #ifndef PRODUCT 3361 3362 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3363 switch (msg) { 3364 case BytecodeInterpreter::no_request: return("no_request"); 3365 case BytecodeInterpreter::initialize: return("initialize"); 3366 // status message to C++ interpreter 3367 case BytecodeInterpreter::method_entry: return("method_entry"); 3368 case BytecodeInterpreter::method_resume: return("method_resume"); 3369 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3370 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3371 // requests to frame manager from C++ interpreter 3372 case BytecodeInterpreter::call_method: return("call_method"); 3373 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3374 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3375 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3376 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3377 case BytecodeInterpreter::do_osr: return("do_osr"); 3378 // deopt 3379 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3380 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3381 default: return("BAD MSG"); 3382 } 3383 } 3384 void 3385 BytecodeInterpreter::print() { 3386 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3387 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3388 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3389 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3390 { 3391 ResourceMark rm; 3392 char *method_name = _method->name_and_sig_as_C_string(); 3393 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3394 } 3395 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3396 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3397 tty->print_cr("msg: %s", C_msg(this->_msg)); 3398 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3399 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3400 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3401 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3402 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3403 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3404 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3405 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3406 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3407 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3408 #ifdef SPARC 3409 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3410 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3411 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3412 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3413 #endif 3414 #if !defined(ZERO) && defined(PPC) 3415 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3416 #endif // !ZERO 3417 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3418 } 3419 3420 extern "C" { 3421 void PI(uintptr_t arg) { 3422 ((BytecodeInterpreter*)arg)->print(); 3423 } 3424 } 3425 #endif // PRODUCT 3426 3427 #endif // JVMTI 3428 #endif // CC_INTERP