1 /*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "classfile/vmSymbols.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/bytecodeInterpreter.hpp"
30 #include "interpreter/bytecodeInterpreter.inline.hpp"
31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/objArrayOop.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "prims/jvmtiThreadState.hpp"
41 #include "runtime/atomic.inline.hpp"
42 #include "runtime/biasedLocking.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/interfaceSupport.hpp"
46 #include "runtime/orderAccess.inline.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "runtime/threadCritical.hpp"
49 #include "utilities/exceptions.hpp"
50
51 // no precompiled headers
52 #ifdef CC_INTERP
53
54 /*
55 * USELABELS - If using GCC, then use labels for the opcode dispatching
56 * rather -then a switch statement. This improves performance because it
57 * gives us the opportunity to have the instructions that calculate the
58 * next opcode to jump to be intermixed with the rest of the instructions
59 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
60 */
61 #undef USELABELS
62 #ifdef __GNUC__
63 /*
64 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
65 don't use the computed goto approach.
66 */
67 #ifndef ASSERT
68 #define USELABELS
69 #endif
70 #endif
71
72 #undef CASE
73 #ifdef USELABELS
74 #define CASE(opcode) opc ## opcode
75 #define DEFAULT opc_default
76 #else
77 #define CASE(opcode) case Bytecodes:: opcode
78 #define DEFAULT default
79 #endif
80
81 /*
82 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
83 * opcode before going back to the top of the while loop, rather then having
84 * the top of the while loop handle it. This provides a better opportunity
85 * for instruction scheduling. Some compilers just do this prefetch
86 * automatically. Some actually end up with worse performance if you
87 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
88 */
89 #undef PREFETCH_OPCCODE
90 #define PREFETCH_OPCCODE
91
92 /*
93 Interpreter safepoint: it is expected that the interpreter will have no live
94 handles of its own creation live at an interpreter safepoint. Therefore we
95 run a HandleMarkCleaner and trash all handles allocated in the call chain
96 since the JavaCalls::call_helper invocation that initiated the chain.
97 There really shouldn't be any handles remaining to trash but this is cheap
98 in relation to a safepoint.
99 */
100 #define SAFEPOINT \
101 if ( SafepointSynchronize::is_synchronizing()) { \
102 { \
103 /* zap freed handles rather than GC'ing them */ \
104 HandleMarkCleaner __hmc(THREAD); \
105 } \
106 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
107 }
108
109 /*
110 * VM_JAVA_ERROR - Macro for throwing a java exception from
111 * the interpreter loop. Should really be a CALL_VM but there
112 * is no entry point to do the transition to vm so we just
113 * do it by hand here.
114 */
115 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
116 DECACHE_STATE(); \
117 SET_LAST_JAVA_FRAME(); \
118 { \
119 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
120 ThreadInVMfromJava trans(THREAD); \
121 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
122 } \
123 RESET_LAST_JAVA_FRAME(); \
124 CACHE_STATE();
125
126 // Normal throw of a java error.
127 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
128 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
129 goto handle_exception;
130
131 #ifdef PRODUCT
132 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
133 #else
134 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
135 { \
136 BytecodeCounter::_counter_value++; \
137 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
138 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
139 if (TraceBytecodes) { \
140 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
141 topOfStack[Interpreter::expr_index_at(1)], \
142 topOfStack[Interpreter::expr_index_at(2)]), \
143 handle_exception); \
144 } \
145 }
146 #endif
147
148 #undef DEBUGGER_SINGLE_STEP_NOTIFY
149 #ifdef VM_JVMTI
150 /* NOTE: (kbr) This macro must be called AFTER the PC has been
151 incremented. JvmtiExport::at_single_stepping_point() may cause a
152 breakpoint opcode to get inserted at the current PC to allow the
153 debugger to coalesce single-step events.
154
155 As a result if we call at_single_stepping_point() we refetch opcode
156 to get the current opcode. This will override any other prefetching
157 that might have occurred.
158 */
159 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
160 { \
161 if (_jvmti_interp_events) { \
162 if (JvmtiExport::should_post_single_step()) { \
163 DECACHE_STATE(); \
164 SET_LAST_JAVA_FRAME(); \
165 ThreadInVMfromJava trans(THREAD); \
166 JvmtiExport::at_single_stepping_point(THREAD, \
167 istate->method(), \
168 pc); \
169 RESET_LAST_JAVA_FRAME(); \
170 CACHE_STATE(); \
171 if (THREAD->pop_frame_pending() && \
172 !THREAD->pop_frame_in_process()) { \
173 goto handle_Pop_Frame; \
174 } \
175 if (THREAD->jvmti_thread_state() && \
176 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
177 goto handle_Early_Return; \
178 } \
179 opcode = *pc; \
180 } \
181 } \
182 }
183 #else
184 #define DEBUGGER_SINGLE_STEP_NOTIFY()
185 #endif
186
187 /*
188 * CONTINUE - Macro for executing the next opcode.
189 */
190 #undef CONTINUE
191 #ifdef USELABELS
192 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
193 // initialization (which is is the initialization of the table pointer...)
194 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
195 #define CONTINUE { \
196 opcode = *pc; \
197 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
198 DEBUGGER_SINGLE_STEP_NOTIFY(); \
199 DISPATCH(opcode); \
200 }
201 #else
202 #ifdef PREFETCH_OPCCODE
203 #define CONTINUE { \
204 opcode = *pc; \
205 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
206 DEBUGGER_SINGLE_STEP_NOTIFY(); \
207 continue; \
208 }
209 #else
210 #define CONTINUE { \
211 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
212 DEBUGGER_SINGLE_STEP_NOTIFY(); \
213 continue; \
214 }
215 #endif
216 #endif
217
218
219 #define UPDATE_PC(opsize) {pc += opsize; }
220 /*
221 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
222 */
223 #undef UPDATE_PC_AND_TOS
224 #define UPDATE_PC_AND_TOS(opsize, stack) \
225 {pc += opsize; MORE_STACK(stack); }
226
227 /*
228 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
229 * and executing the next opcode. It's somewhat similar to the combination
230 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
231 */
232 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
233 #ifdef USELABELS
234 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
235 pc += opsize; opcode = *pc; MORE_STACK(stack); \
236 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
237 DEBUGGER_SINGLE_STEP_NOTIFY(); \
238 DISPATCH(opcode); \
239 }
240
241 #define UPDATE_PC_AND_CONTINUE(opsize) { \
242 pc += opsize; opcode = *pc; \
243 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
244 DEBUGGER_SINGLE_STEP_NOTIFY(); \
245 DISPATCH(opcode); \
246 }
247 #else
248 #ifdef PREFETCH_OPCCODE
249 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
250 pc += opsize; opcode = *pc; MORE_STACK(stack); \
251 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
252 DEBUGGER_SINGLE_STEP_NOTIFY(); \
253 goto do_continue; \
254 }
255
256 #define UPDATE_PC_AND_CONTINUE(opsize) { \
257 pc += opsize; opcode = *pc; \
258 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
259 DEBUGGER_SINGLE_STEP_NOTIFY(); \
260 goto do_continue; \
261 }
262 #else
263 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
264 pc += opsize; MORE_STACK(stack); \
265 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
266 DEBUGGER_SINGLE_STEP_NOTIFY(); \
267 goto do_continue; \
268 }
269
270 #define UPDATE_PC_AND_CONTINUE(opsize) { \
271 pc += opsize; \
272 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
273 DEBUGGER_SINGLE_STEP_NOTIFY(); \
274 goto do_continue; \
275 }
276 #endif /* PREFETCH_OPCCODE */
277 #endif /* USELABELS */
278
279 // About to call a new method, update the save the adjusted pc and return to frame manager
280 #define UPDATE_PC_AND_RETURN(opsize) \
281 DECACHE_TOS(); \
282 istate->set_bcp(pc+opsize); \
283 return;
284
285
286 #define METHOD istate->method()
287 #define GET_METHOD_COUNTERS(res) \
288 res = METHOD->method_counters(); \
289 if (res == NULL) { \
290 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
291 }
292
293 #define OSR_REQUEST(res, branch_pc) \
294 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
295 /*
296 * For those opcodes that need to have a GC point on a backwards branch
297 */
298
299 // Backedge counting is kind of strange. The asm interpreter will increment
300 // the backedge counter as a separate counter but it does it's comparisons
301 // to the sum (scaled) of invocation counter and backedge count to make
302 // a decision. Seems kind of odd to sum them together like that
303
304 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
305
306
307 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
308 if ((skip) <= 0) { \
309 MethodCounters* mcs; \
310 GET_METHOD_COUNTERS(mcs); \
311 if (UseLoopCounter) { \
312 bool do_OSR = UseOnStackReplacement; \
313 mcs->backedge_counter()->increment(); \
314 if (ProfileInterpreter) { \
315 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
316 /* Check for overflow against MDO count. */ \
317 do_OSR = do_OSR \
318 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
319 /* When ProfileInterpreter is on, the backedge_count comes */ \
320 /* from the methodDataOop, which value does not get reset on */ \
321 /* the call to frequency_counter_overflow(). To avoid */ \
322 /* excessive calls to the overflow routine while the method is */ \
323 /* being compiled, add a second test to make sure the overflow */ \
324 /* function is called only once every overflow_frequency. */ \
325 && (!(mdo_last_branch_taken_count & 1023)); \
326 } else { \
327 /* check for overflow of backedge counter */ \
328 do_OSR = do_OSR \
329 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
330 } \
331 if (do_OSR) { \
332 nmethod* osr_nmethod; \
333 OSR_REQUEST(osr_nmethod, branch_pc); \
334 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \
335 intptr_t* buf; \
336 /* Call OSR migration with last java frame only, no checks. */ \
337 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
338 istate->set_msg(do_osr); \
339 istate->set_osr_buf((address)buf); \
340 istate->set_osr_entry(osr_nmethod->osr_entry()); \
341 return; \
342 } \
343 } \
344 } /* UseCompiler ... */ \
345 SAFEPOINT; \
346 }
347
348 /*
349 * For those opcodes that need to have a GC point on a backwards branch
350 */
351
352 /*
353 * Macros for caching and flushing the interpreter state. Some local
354 * variables need to be flushed out to the frame before we do certain
355 * things (like pushing frames or becomming gc safe) and some need to
356 * be recached later (like after popping a frame). We could use one
357 * macro to cache or decache everything, but this would be less then
358 * optimal because we don't always need to cache or decache everything
359 * because some things we know are already cached or decached.
360 */
361 #undef DECACHE_TOS
362 #undef CACHE_TOS
363 #undef CACHE_PREV_TOS
364 #define DECACHE_TOS() istate->set_stack(topOfStack);
365
366 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
367
368 #undef DECACHE_PC
369 #undef CACHE_PC
370 #define DECACHE_PC() istate->set_bcp(pc);
371 #define CACHE_PC() pc = istate->bcp();
372 #define CACHE_CP() cp = istate->constants();
373 #define CACHE_LOCALS() locals = istate->locals();
374 #undef CACHE_FRAME
375 #define CACHE_FRAME()
376
377 // BCI() returns the current bytecode-index.
378 #undef BCI
379 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
380
381 /*
382 * CHECK_NULL - Macro for throwing a NullPointerException if the object
383 * passed is a null ref.
384 * On some architectures/platforms it should be possible to do this implicitly
385 */
386 #undef CHECK_NULL
387 #define CHECK_NULL(obj_) \
388 if ((obj_) == NULL) { \
389 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
390 } \
391 VERIFY_OOP(obj_)
392
393 #define VMdoubleConstZero() 0.0
394 #define VMdoubleConstOne() 1.0
395 #define VMlongConstZero() (max_jlong-max_jlong)
396 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
397
398 /*
399 * Alignment
400 */
401 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
402
403 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
404 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
405
406 // Reload interpreter state after calling the VM or a possible GC
407 #define CACHE_STATE() \
408 CACHE_TOS(); \
409 CACHE_PC(); \
410 CACHE_CP(); \
411 CACHE_LOCALS();
412
413 // Call the VM with last java frame only.
414 #define CALL_VM_NAKED_LJF(func) \
415 DECACHE_STATE(); \
416 SET_LAST_JAVA_FRAME(); \
417 func; \
418 RESET_LAST_JAVA_FRAME(); \
419 CACHE_STATE();
420
421 // Call the VM. Don't check for pending exceptions.
422 #define CALL_VM_NOCHECK(func) \
423 CALL_VM_NAKED_LJF(func) \
424 if (THREAD->pop_frame_pending() && \
425 !THREAD->pop_frame_in_process()) { \
426 goto handle_Pop_Frame; \
427 } \
428 if (THREAD->jvmti_thread_state() && \
429 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
430 goto handle_Early_Return; \
431 }
432
433 // Call the VM and check for pending exceptions
434 #define CALL_VM(func, label) { \
435 CALL_VM_NOCHECK(func); \
436 if (THREAD->has_pending_exception()) goto label; \
437 }
438
439 /*
440 * BytecodeInterpreter::run(interpreterState istate)
441 * BytecodeInterpreter::runWithChecks(interpreterState istate)
442 *
443 * The real deal. This is where byte codes actually get interpreted.
444 * Basically it's a big while loop that iterates until we return from
445 * the method passed in.
446 *
447 * The runWithChecks is used if JVMTI is enabled.
448 *
449 */
450 #if defined(VM_JVMTI)
451 void
452 BytecodeInterpreter::runWithChecks(interpreterState istate) {
453 #else
454 void
455 BytecodeInterpreter::run(interpreterState istate) {
456 #endif
457
458 // In order to simplify some tests based on switches set at runtime
459 // we invoke the interpreter a single time after switches are enabled
460 // and set simpler to to test variables rather than method calls or complex
461 // boolean expressions.
462
463 static int initialized = 0;
464 static int checkit = 0;
465 static intptr_t* c_addr = NULL;
466 static intptr_t c_value;
467
468 if (checkit && *c_addr != c_value) {
469 os::breakpoint();
470 }
471 #ifdef VM_JVMTI
472 static bool _jvmti_interp_events = 0;
473 #endif
474
475 static int _compiling; // (UseCompiler || CountCompiledCalls)
476
477 #ifdef ASSERT
478 if (istate->_msg != initialize) {
479 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
480 #ifndef SHARK
481 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
482 #endif // !SHARK
483 }
484 // Verify linkages.
485 interpreterState l = istate;
486 do {
487 assert(l == l->_self_link, "bad link");
488 l = l->_prev_link;
489 } while (l != NULL);
490 // Screwups with stack management usually cause us to overwrite istate
491 // save a copy so we can verify it.
492 interpreterState orig = istate;
493 #endif
494
495 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
496 register address pc = istate->bcp();
497 register jubyte opcode;
498 register intptr_t* locals = istate->locals();
499 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
500 #ifdef LOTS_OF_REGS
501 register JavaThread* THREAD = istate->thread();
502 #else
503 #undef THREAD
504 #define THREAD istate->thread()
505 #endif
506
507 #ifdef USELABELS
508 const static void* const opclabels_data[256] = {
509 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
510 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
511 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
512 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
513
514 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
515 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
516 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
517 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
518
519 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
520 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
521 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
522 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
523
524 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
525 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
526 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
527 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
528
529 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
530 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
531 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
532 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
533
534 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
535 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
536 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
537 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
538
539 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
540 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
541 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
542 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
543
544 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
545 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
546 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
547 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
548
549 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
550 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
551 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
552 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
553
554 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
555 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
556 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
557 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
558
559 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
560 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
561 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
562 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
563
564 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
565 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
566 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
567 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
568
569 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
570 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
571 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
572 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
573
574 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
575 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
576 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
577 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
578
579 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
580 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
581 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
582 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
583
584 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
585 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
586 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
587 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
588 };
589 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
590 #endif /* USELABELS */
591
592 #ifdef ASSERT
593 // this will trigger a VERIFY_OOP on entry
594 if (istate->msg() != initialize && ! METHOD->is_static()) {
595 oop rcvr = LOCALS_OBJECT(0);
596 VERIFY_OOP(rcvr);
597 }
598 #endif
599 // #define HACK
600 #ifdef HACK
601 bool interesting = false;
602 #endif // HACK
603
604 /* QQQ this should be a stack method so we don't know actual direction */
605 guarantee(istate->msg() == initialize ||
606 topOfStack >= istate->stack_limit() &&
607 topOfStack < istate->stack_base(),
608 "Stack top out of range");
609
610 #ifdef CC_INTERP_PROFILE
611 // MethodData's last branch taken count.
612 uint mdo_last_branch_taken_count = 0;
613 #else
614 const uint mdo_last_branch_taken_count = 0;
615 #endif
616
617 switch (istate->msg()) {
618 case initialize: {
619 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
620 _compiling = (UseCompiler || CountCompiledCalls);
621 #ifdef VM_JVMTI
622 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
623 #endif
624 return;
625 }
626 break;
627 case method_entry: {
628 THREAD->set_do_not_unlock();
629 // count invocations
630 assert(initialized, "Interpreter not initialized");
631 if (_compiling) {
632 MethodCounters* mcs;
633 GET_METHOD_COUNTERS(mcs);
634 if (ProfileInterpreter) {
635 METHOD->increment_interpreter_invocation_count(THREAD);
636 }
637 mcs->invocation_counter()->increment();
638 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
639 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
640 // We no longer retry on a counter overflow.
641 }
642 // Get or create profile data. Check for pending (async) exceptions.
643 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
644 SAFEPOINT;
645 }
646
647 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
648 // initialize
649 os::breakpoint();
650 }
651
652 #ifdef HACK
653 {
654 ResourceMark rm;
655 char *method_name = istate->method()->name_and_sig_as_C_string();
656 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
657 tty->print_cr("entering: depth %d bci: %d",
658 (istate->_stack_base - istate->_stack),
659 istate->_bcp - istate->_method->code_base());
660 interesting = true;
661 }
662 }
663 #endif // HACK
664
665 // Lock method if synchronized.
666 if (METHOD->is_synchronized()) {
667 // oop rcvr = locals[0].j.r;
668 oop rcvr;
669 if (METHOD->is_static()) {
670 rcvr = METHOD->constants()->pool_holder()->java_mirror();
671 } else {
672 rcvr = LOCALS_OBJECT(0);
673 VERIFY_OOP(rcvr);
674 }
675 // The initial monitor is ours for the taking.
676 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
677 BasicObjectLock* mon = &istate->monitor_base()[-1];
678 mon->set_obj(rcvr);
679 bool success = false;
680 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
681 markOop mark = rcvr->mark();
682 intptr_t hash = (intptr_t) markOopDesc::no_hash;
683 // Implies UseBiasedLocking.
684 if (mark->has_bias_pattern()) {
685 uintptr_t thread_ident;
686 uintptr_t anticipated_bias_locking_value;
687 thread_ident = (uintptr_t)istate->thread();
688 anticipated_bias_locking_value =
689 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
690 ~((uintptr_t) markOopDesc::age_mask_in_place);
691
692 if (anticipated_bias_locking_value == 0) {
693 // Already biased towards this thread, nothing to do.
694 if (PrintBiasedLockingStatistics) {
695 (* BiasedLocking::biased_lock_entry_count_addr())++;
696 }
697 success = true;
698 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
699 // Try to revoke bias.
700 markOop header = rcvr->klass()->prototype_header();
701 if (hash != markOopDesc::no_hash) {
702 header = header->copy_set_hash(hash);
703 }
704 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
705 if (PrintBiasedLockingStatistics)
706 (*BiasedLocking::revoked_lock_entry_count_addr())++;
707 }
708 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
709 // Try to rebias.
710 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
711 if (hash != markOopDesc::no_hash) {
712 new_header = new_header->copy_set_hash(hash);
713 }
714 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
715 if (PrintBiasedLockingStatistics) {
716 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
717 }
718 } else {
719 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
720 }
721 success = true;
722 } else {
723 // Try to bias towards thread in case object is anonymously biased.
724 markOop header = (markOop) ((uintptr_t) mark &
725 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
726 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
727 if (hash != markOopDesc::no_hash) {
728 header = header->copy_set_hash(hash);
729 }
730 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
731 // Debugging hint.
732 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
733 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
734 if (PrintBiasedLockingStatistics) {
735 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
736 }
737 } else {
738 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
739 }
740 success = true;
741 }
742 }
743
744 // Traditional lightweight locking.
745 if (!success) {
746 markOop displaced = rcvr->mark()->set_unlocked();
747 mon->lock()->set_displaced_header(displaced);
748 bool call_vm = UseHeavyMonitors;
749 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
750 // Is it simple recursive case?
751 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
752 mon->lock()->set_displaced_header(NULL);
753 } else {
754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
755 }
756 }
757 }
758 }
759 THREAD->clr_do_not_unlock();
760
761 // Notify jvmti
762 #ifdef VM_JVMTI
763 if (_jvmti_interp_events) {
764 // Whenever JVMTI puts a thread in interp_only_mode, method
765 // entry/exit events are sent for that thread to track stack depth.
766 if (THREAD->is_interp_only_mode()) {
767 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
768 handle_exception);
769 }
770 }
771 #endif /* VM_JVMTI */
772
773 goto run;
774 }
775
776 case popping_frame: {
777 // returned from a java call to pop the frame, restart the call
778 // clear the message so we don't confuse ourselves later
779 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
780 istate->set_msg(no_request);
781 if (_compiling) {
782 // Set MDX back to the ProfileData of the invoke bytecode that will be
783 // restarted.
784 SET_MDX(NULL);
785 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
786 }
787 THREAD->clr_pop_frame_in_process();
788 goto run;
789 }
790
791 case method_resume: {
792 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
793 // resume
794 os::breakpoint();
795 }
796 #ifdef HACK
797 {
798 ResourceMark rm;
799 char *method_name = istate->method()->name_and_sig_as_C_string();
800 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
801 tty->print_cr("resume: depth %d bci: %d",
802 (istate->_stack_base - istate->_stack) ,
803 istate->_bcp - istate->_method->code_base());
804 interesting = true;
805 }
806 }
807 #endif // HACK
808 // returned from a java call, continue executing.
809 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
810 goto handle_Pop_Frame;
811 }
812 if (THREAD->jvmti_thread_state() &&
813 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
814 goto handle_Early_Return;
815 }
816
817 if (THREAD->has_pending_exception()) goto handle_exception;
818 // Update the pc by the saved amount of the invoke bytecode size
819 UPDATE_PC(istate->bcp_advance());
820
821 if (_compiling) {
822 // Get or create profile data. Check for pending (async) exceptions.
823 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
824 }
825 goto run;
826 }
827
828 case deopt_resume2: {
829 // Returned from an opcode that will reexecute. Deopt was
830 // a result of a PopFrame request.
831 //
832
833 if (_compiling) {
834 // Get or create profile data. Check for pending (async) exceptions.
835 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
836 }
837 goto run;
838 }
839
840 case deopt_resume: {
841 // Returned from an opcode that has completed. The stack has
842 // the result all we need to do is skip across the bytecode
843 // and continue (assuming there is no exception pending)
844 //
845 // compute continuation length
846 //
847 // Note: it is possible to deopt at a return_register_finalizer opcode
848 // because this requires entering the vm to do the registering. While the
849 // opcode is complete we can't advance because there are no more opcodes
850 // much like trying to deopt at a poll return. In that has we simply
851 // get out of here
852 //
853 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
854 // this will do the right thing even if an exception is pending.
855 goto handle_return;
856 }
857 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
858 if (THREAD->has_pending_exception()) goto handle_exception;
859
860 if (_compiling) {
861 // Get or create profile data. Check for pending (async) exceptions.
862 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
863 }
864 goto run;
865 }
866 case got_monitors: {
867 // continue locking now that we have a monitor to use
868 // we expect to find newly allocated monitor at the "top" of the monitor stack.
869 oop lockee = STACK_OBJECT(-1);
870 VERIFY_OOP(lockee);
871 // derefing's lockee ought to provoke implicit null check
872 // find a free monitor
873 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
874 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
875 entry->set_obj(lockee);
876 bool success = false;
877 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
878
879 markOop mark = lockee->mark();
880 intptr_t hash = (intptr_t) markOopDesc::no_hash;
881 // implies UseBiasedLocking
882 if (mark->has_bias_pattern()) {
883 uintptr_t thread_ident;
884 uintptr_t anticipated_bias_locking_value;
885 thread_ident = (uintptr_t)istate->thread();
886 anticipated_bias_locking_value =
887 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
888 ~((uintptr_t) markOopDesc::age_mask_in_place);
889
890 if (anticipated_bias_locking_value == 0) {
891 // already biased towards this thread, nothing to do
892 if (PrintBiasedLockingStatistics) {
893 (* BiasedLocking::biased_lock_entry_count_addr())++;
894 }
895 success = true;
896 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
897 // try revoke bias
898 markOop header = lockee->klass()->prototype_header();
899 if (hash != markOopDesc::no_hash) {
900 header = header->copy_set_hash(hash);
901 }
902 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
903 if (PrintBiasedLockingStatistics) {
904 (*BiasedLocking::revoked_lock_entry_count_addr())++;
905 }
906 }
907 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
908 // try rebias
909 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
910 if (hash != markOopDesc::no_hash) {
911 new_header = new_header->copy_set_hash(hash);
912 }
913 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
914 if (PrintBiasedLockingStatistics) {
915 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
916 }
917 } else {
918 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
919 }
920 success = true;
921 } else {
922 // try to bias towards thread in case object is anonymously biased
923 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
924 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
925 if (hash != markOopDesc::no_hash) {
926 header = header->copy_set_hash(hash);
927 }
928 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
929 // debugging hint
930 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
931 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
932 if (PrintBiasedLockingStatistics) {
933 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
934 }
935 } else {
936 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
937 }
938 success = true;
939 }
940 }
941
942 // traditional lightweight locking
943 if (!success) {
944 markOop displaced = lockee->mark()->set_unlocked();
945 entry->lock()->set_displaced_header(displaced);
946 bool call_vm = UseHeavyMonitors;
947 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
948 // Is it simple recursive case?
949 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
950 entry->lock()->set_displaced_header(NULL);
951 } else {
952 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
953 }
954 }
955 }
956 UPDATE_PC_AND_TOS(1, -1);
957 goto run;
958 }
959 default: {
960 fatal("Unexpected message from frame manager");
961 }
962 }
963
964 run:
965
966 DO_UPDATE_INSTRUCTION_COUNT(*pc)
967 DEBUGGER_SINGLE_STEP_NOTIFY();
968 #ifdef PREFETCH_OPCCODE
969 opcode = *pc; /* prefetch first opcode */
970 #endif
971
972 #ifndef USELABELS
973 while (1)
974 #endif
975 {
976 #ifndef PREFETCH_OPCCODE
977 opcode = *pc;
978 #endif
979 // Seems like this happens twice per opcode. At worst this is only
980 // need at entry to the loop.
981 // DEBUGGER_SINGLE_STEP_NOTIFY();
982 /* Using this labels avoids double breakpoints when quickening and
983 * when returing from transition frames.
984 */
985 opcode_switch:
986 assert(istate == orig, "Corrupted istate");
987 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
988 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
989 assert(topOfStack < istate->stack_base(), "Stack underrun");
990
991 #ifdef USELABELS
992 DISPATCH(opcode);
993 #else
994 switch (opcode)
995 #endif
996 {
997 CASE(_nop):
998 UPDATE_PC_AND_CONTINUE(1);
999
1000 /* Push miscellaneous constants onto the stack. */
1001
1002 CASE(_aconst_null):
1003 SET_STACK_OBJECT(NULL, 0);
1004 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1005
1006 #undef OPC_CONST_n
1007 #define OPC_CONST_n(opcode, const_type, value) \
1008 CASE(opcode): \
1009 SET_STACK_ ## const_type(value, 0); \
1010 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1011
1012 OPC_CONST_n(_iconst_m1, INT, -1);
1013 OPC_CONST_n(_iconst_0, INT, 0);
1014 OPC_CONST_n(_iconst_1, INT, 1);
1015 OPC_CONST_n(_iconst_2, INT, 2);
1016 OPC_CONST_n(_iconst_3, INT, 3);
1017 OPC_CONST_n(_iconst_4, INT, 4);
1018 OPC_CONST_n(_iconst_5, INT, 5);
1019 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
1020 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
1021 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
1022
1023 #undef OPC_CONST2_n
1024 #define OPC_CONST2_n(opcname, value, key, kind) \
1025 CASE(_##opcname): \
1026 { \
1027 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
1028 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1029 }
1030 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
1031 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
1032 OPC_CONST2_n(lconst_0, Zero, long, LONG);
1033 OPC_CONST2_n(lconst_1, One, long, LONG);
1034
1035 /* Load constant from constant pool: */
1036
1037 /* Push a 1-byte signed integer value onto the stack. */
1038 CASE(_bipush):
1039 SET_STACK_INT((jbyte)(pc[1]), 0);
1040 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1041
1042 /* Push a 2-byte signed integer constant onto the stack. */
1043 CASE(_sipush):
1044 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
1045 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1046
1047 /* load from local variable */
1048
1049 CASE(_aload):
1050 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
1051 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
1052 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1053
1054 CASE(_iload):
1055 CASE(_fload):
1056 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1058
1059 CASE(_lload):
1060 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
1061 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1062
1063 CASE(_dload):
1064 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
1065 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1066
1067 #undef OPC_LOAD_n
1068 #define OPC_LOAD_n(num) \
1069 CASE(_aload_##num): \
1070 VERIFY_OOP(LOCALS_OBJECT(num)); \
1071 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
1072 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1073 \
1074 CASE(_iload_##num): \
1075 CASE(_fload_##num): \
1076 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1078 \
1079 CASE(_lload_##num): \
1080 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
1081 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1082 CASE(_dload_##num): \
1083 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
1084 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1085
1086 OPC_LOAD_n(0);
1087 OPC_LOAD_n(1);
1088 OPC_LOAD_n(2);
1089 OPC_LOAD_n(3);
1090
1091 /* store to a local variable */
1092
1093 CASE(_astore):
1094 astore(topOfStack, -1, locals, pc[1]);
1095 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1096
1097 CASE(_istore):
1098 CASE(_fstore):
1099 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
1100 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1101
1102 CASE(_lstore):
1103 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
1104 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1105
1106 CASE(_dstore):
1107 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
1108 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1109
1110 CASE(_wide): {
1111 uint16_t reg = Bytes::get_Java_u2(pc + 2);
1112
1113 opcode = pc[1];
1114
1115 // Wide and it's sub-bytecode are counted as separate instructions. If we
1116 // don't account for this here, the bytecode trace skips the next bytecode.
1117 DO_UPDATE_INSTRUCTION_COUNT(opcode);
1118
1119 switch(opcode) {
1120 case Bytecodes::_aload:
1121 VERIFY_OOP(LOCALS_OBJECT(reg));
1122 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
1123 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1124
1125 case Bytecodes::_iload:
1126 case Bytecodes::_fload:
1127 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
1128 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1129
1130 case Bytecodes::_lload:
1131 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1132 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1133
1134 case Bytecodes::_dload:
1135 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1136 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1137
1138 case Bytecodes::_astore:
1139 astore(topOfStack, -1, locals, reg);
1140 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1141
1142 case Bytecodes::_istore:
1143 case Bytecodes::_fstore:
1144 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
1145 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1146
1147 case Bytecodes::_lstore:
1148 SET_LOCALS_LONG(STACK_LONG(-1), reg);
1149 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1150
1151 case Bytecodes::_dstore:
1152 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
1153 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1154
1155 case Bytecodes::_iinc: {
1156 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
1157 // Be nice to see what this generates.... QQQ
1158 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
1159 UPDATE_PC_AND_CONTINUE(6);
1160 }
1161 case Bytecodes::_ret:
1162 // Profile ret.
1163 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
1164 // Now, update the pc.
1165 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1166 UPDATE_PC_AND_CONTINUE(0);
1167 default:
1168 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
1169 }
1170 }
1171
1172
1173 #undef OPC_STORE_n
1174 #define OPC_STORE_n(num) \
1175 CASE(_astore_##num): \
1176 astore(topOfStack, -1, locals, num); \
1177 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1178 CASE(_istore_##num): \
1179 CASE(_fstore_##num): \
1180 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1181 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1182
1183 OPC_STORE_n(0);
1184 OPC_STORE_n(1);
1185 OPC_STORE_n(2);
1186 OPC_STORE_n(3);
1187
1188 #undef OPC_DSTORE_n
1189 #define OPC_DSTORE_n(num) \
1190 CASE(_dstore_##num): \
1191 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1192 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1193 CASE(_lstore_##num): \
1194 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1195 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1196
1197 OPC_DSTORE_n(0);
1198 OPC_DSTORE_n(1);
1199 OPC_DSTORE_n(2);
1200 OPC_DSTORE_n(3);
1201
1202 /* stack pop, dup, and insert opcodes */
1203
1204
1205 CASE(_pop): /* Discard the top item on the stack */
1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1207
1208
1209 CASE(_pop2): /* Discard the top 2 items on the stack */
1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1211
1212
1213 CASE(_dup): /* Duplicate the top item on the stack */
1214 dup(topOfStack);
1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1216
1217 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1218 dup2(topOfStack);
1219 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1220
1221 CASE(_dup_x1): /* insert top word two down */
1222 dup_x1(topOfStack);
1223 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1224
1225 CASE(_dup_x2): /* insert top word three down */
1226 dup_x2(topOfStack);
1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1228
1229 CASE(_dup2_x1): /* insert top 2 slots three down */
1230 dup2_x1(topOfStack);
1231 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1232
1233 CASE(_dup2_x2): /* insert top 2 slots four down */
1234 dup2_x2(topOfStack);
1235 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1236
1237 CASE(_swap): { /* swap top two elements on the stack */
1238 swap(topOfStack);
1239 UPDATE_PC_AND_CONTINUE(1);
1240 }
1241
1242 /* Perform various binary integer operations */
1243
1244 #undef OPC_INT_BINARY
1245 #define OPC_INT_BINARY(opcname, opname, test) \
1246 CASE(_i##opcname): \
1247 if (test && (STACK_INT(-1) == 0)) { \
1248 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1249 "/ by zero", note_div0Check_trap); \
1250 } \
1251 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1252 STACK_INT(-1)), \
1253 -2); \
1254 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1255 CASE(_l##opcname): \
1256 { \
1257 if (test) { \
1258 jlong l1 = STACK_LONG(-1); \
1259 if (VMlongEqz(l1)) { \
1260 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1261 "/ by long zero", note_div0Check_trap); \
1262 } \
1263 } \
1264 /* First long at (-1,-2) next long at (-3,-4) */ \
1265 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1266 STACK_LONG(-1)), \
1267 -3); \
1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1269 }
1270
1271 OPC_INT_BINARY(add, Add, 0);
1272 OPC_INT_BINARY(sub, Sub, 0);
1273 OPC_INT_BINARY(mul, Mul, 0);
1274 OPC_INT_BINARY(and, And, 0);
1275 OPC_INT_BINARY(or, Or, 0);
1276 OPC_INT_BINARY(xor, Xor, 0);
1277 OPC_INT_BINARY(div, Div, 1);
1278 OPC_INT_BINARY(rem, Rem, 1);
1279
1280
1281 /* Perform various binary floating number operations */
1282 /* On some machine/platforms/compilers div zero check can be implicit */
1283
1284 #undef OPC_FLOAT_BINARY
1285 #define OPC_FLOAT_BINARY(opcname, opname) \
1286 CASE(_d##opcname): { \
1287 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1288 STACK_DOUBLE(-1)), \
1289 -3); \
1290 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1291 } \
1292 CASE(_f##opcname): \
1293 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1294 STACK_FLOAT(-1)), \
1295 -2); \
1296 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1297
1298
1299 OPC_FLOAT_BINARY(add, Add);
1300 OPC_FLOAT_BINARY(sub, Sub);
1301 OPC_FLOAT_BINARY(mul, Mul);
1302 OPC_FLOAT_BINARY(div, Div);
1303 OPC_FLOAT_BINARY(rem, Rem);
1304
1305 /* Shift operations
1306 * Shift left int and long: ishl, lshl
1307 * Logical shift right int and long w/zero extension: iushr, lushr
1308 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1309 */
1310
1311 #undef OPC_SHIFT_BINARY
1312 #define OPC_SHIFT_BINARY(opcname, opname) \
1313 CASE(_i##opcname): \
1314 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1315 STACK_INT(-1)), \
1316 -2); \
1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1318 CASE(_l##opcname): \
1319 { \
1320 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1321 STACK_INT(-1)), \
1322 -2); \
1323 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1324 }
1325
1326 OPC_SHIFT_BINARY(shl, Shl);
1327 OPC_SHIFT_BINARY(shr, Shr);
1328 OPC_SHIFT_BINARY(ushr, Ushr);
1329
1330 /* Increment local variable by constant */
1331 CASE(_iinc):
1332 {
1333 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1334 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1335 UPDATE_PC_AND_CONTINUE(3);
1336 }
1337
1338 /* negate the value on the top of the stack */
1339
1340 CASE(_ineg):
1341 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1342 UPDATE_PC_AND_CONTINUE(1);
1343
1344 CASE(_fneg):
1345 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1346 UPDATE_PC_AND_CONTINUE(1);
1347
1348 CASE(_lneg):
1349 {
1350 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1351 UPDATE_PC_AND_CONTINUE(1);
1352 }
1353
1354 CASE(_dneg):
1355 {
1356 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1357 UPDATE_PC_AND_CONTINUE(1);
1358 }
1359
1360 /* Conversion operations */
1361
1362 CASE(_i2f): /* convert top of stack int to float */
1363 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1364 UPDATE_PC_AND_CONTINUE(1);
1365
1366 CASE(_i2l): /* convert top of stack int to long */
1367 {
1368 // this is ugly QQQ
1369 jlong r = VMint2Long(STACK_INT(-1));
1370 MORE_STACK(-1); // Pop
1371 SET_STACK_LONG(r, 1);
1372
1373 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1374 }
1375
1376 CASE(_i2d): /* convert top of stack int to double */
1377 {
1378 // this is ugly QQQ (why cast to jlong?? )
1379 jdouble r = (jlong)STACK_INT(-1);
1380 MORE_STACK(-1); // Pop
1381 SET_STACK_DOUBLE(r, 1);
1382
1383 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1384 }
1385
1386 CASE(_l2i): /* convert top of stack long to int */
1387 {
1388 jint r = VMlong2Int(STACK_LONG(-1));
1389 MORE_STACK(-2); // Pop
1390 SET_STACK_INT(r, 0);
1391 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1392 }
1393
1394 CASE(_l2f): /* convert top of stack long to float */
1395 {
1396 jlong r = STACK_LONG(-1);
1397 MORE_STACK(-2); // Pop
1398 SET_STACK_FLOAT(VMlong2Float(r), 0);
1399 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1400 }
1401
1402 CASE(_l2d): /* convert top of stack long to double */
1403 {
1404 jlong r = STACK_LONG(-1);
1405 MORE_STACK(-2); // Pop
1406 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1407 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1408 }
1409
1410 CASE(_f2i): /* Convert top of stack float to int */
1411 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1412 UPDATE_PC_AND_CONTINUE(1);
1413
1414 CASE(_f2l): /* convert top of stack float to long */
1415 {
1416 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1417 MORE_STACK(-1); // POP
1418 SET_STACK_LONG(r, 1);
1419 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1420 }
1421
1422 CASE(_f2d): /* convert top of stack float to double */
1423 {
1424 jfloat f;
1425 jdouble r;
1426 f = STACK_FLOAT(-1);
1427 r = (jdouble) f;
1428 MORE_STACK(-1); // POP
1429 SET_STACK_DOUBLE(r, 1);
1430 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1431 }
1432
1433 CASE(_d2i): /* convert top of stack double to int */
1434 {
1435 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1436 MORE_STACK(-2);
1437 SET_STACK_INT(r1, 0);
1438 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1439 }
1440
1441 CASE(_d2f): /* convert top of stack double to float */
1442 {
1443 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1444 MORE_STACK(-2);
1445 SET_STACK_FLOAT(r1, 0);
1446 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1447 }
1448
1449 CASE(_d2l): /* convert top of stack double to long */
1450 {
1451 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1452 MORE_STACK(-2);
1453 SET_STACK_LONG(r1, 1);
1454 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1455 }
1456
1457 CASE(_i2b):
1458 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1459 UPDATE_PC_AND_CONTINUE(1);
1460
1461 CASE(_i2c):
1462 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1463 UPDATE_PC_AND_CONTINUE(1);
1464
1465 CASE(_i2s):
1466 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1467 UPDATE_PC_AND_CONTINUE(1);
1468
1469 /* comparison operators */
1470
1471
1472 #define COMPARISON_OP(name, comparison) \
1473 CASE(_if_icmp##name): { \
1474 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
1475 int skip = cmp \
1476 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1477 address branch_pc = pc; \
1478 /* Profile branch. */ \
1479 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1480 UPDATE_PC_AND_TOS(skip, -2); \
1481 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1482 CONTINUE; \
1483 } \
1484 CASE(_if##name): { \
1485 const bool cmp = (STACK_INT(-1) comparison 0); \
1486 int skip = cmp \
1487 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1488 address branch_pc = pc; \
1489 /* Profile branch. */ \
1490 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1491 UPDATE_PC_AND_TOS(skip, -1); \
1492 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1493 CONTINUE; \
1494 }
1495
1496 #define COMPARISON_OP2(name, comparison) \
1497 COMPARISON_OP(name, comparison) \
1498 CASE(_if_acmp##name): { \
1499 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
1500 int skip = cmp \
1501 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1502 address branch_pc = pc; \
1503 /* Profile branch. */ \
1504 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1505 UPDATE_PC_AND_TOS(skip, -2); \
1506 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1507 CONTINUE; \
1508 }
1509
1510 #define NULL_COMPARISON_NOT_OP(name) \
1511 CASE(_if##name): { \
1512 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
1513 int skip = cmp \
1514 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1515 address branch_pc = pc; \
1516 /* Profile branch. */ \
1517 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1518 UPDATE_PC_AND_TOS(skip, -1); \
1519 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1520 CONTINUE; \
1521 }
1522
1523 #define NULL_COMPARISON_OP(name) \
1524 CASE(_if##name): { \
1525 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
1526 int skip = cmp \
1527 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1528 address branch_pc = pc; \
1529 /* Profile branch. */ \
1530 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1531 UPDATE_PC_AND_TOS(skip, -1); \
1532 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1533 CONTINUE; \
1534 }
1535 COMPARISON_OP(lt, <);
1536 COMPARISON_OP(gt, >);
1537 COMPARISON_OP(le, <=);
1538 COMPARISON_OP(ge, >=);
1539 COMPARISON_OP2(eq, ==); /* include ref comparison */
1540 COMPARISON_OP2(ne, !=); /* include ref comparison */
1541 NULL_COMPARISON_OP(null);
1542 NULL_COMPARISON_NOT_OP(nonnull);
1543
1544 /* Goto pc at specified offset in switch table. */
1545
1546 CASE(_tableswitch): {
1547 jint* lpc = (jint*)VMalignWordUp(pc+1);
1548 int32_t key = STACK_INT(-1);
1549 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1550 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1551 int32_t skip;
1552 key -= low;
1553 if (((uint32_t) key > (uint32_t)(high - low))) {
1554 key = -1;
1555 skip = Bytes::get_Java_u4((address)&lpc[0]);
1556 } else {
1557 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1558 }
1559 // Profile switch.
1560 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
1561 // Does this really need a full backedge check (osr)?
1562 address branch_pc = pc;
1563 UPDATE_PC_AND_TOS(skip, -1);
1564 DO_BACKEDGE_CHECKS(skip, branch_pc);
1565 CONTINUE;
1566 }
1567
1568 /* Goto pc whose table entry matches specified key. */
1569
1570 CASE(_lookupswitch): {
1571 jint* lpc = (jint*)VMalignWordUp(pc+1);
1572 int32_t key = STACK_INT(-1);
1573 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1574 // Remember index.
1575 int index = -1;
1576 int newindex = 0;
1577 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1578 while (--npairs >= 0) {
1579 lpc += 2;
1580 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1581 skip = Bytes::get_Java_u4((address)&lpc[1]);
1582 index = newindex;
1583 break;
1584 }
1585 newindex += 1;
1586 }
1587 // Profile switch.
1588 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
1589 address branch_pc = pc;
1590 UPDATE_PC_AND_TOS(skip, -1);
1591 DO_BACKEDGE_CHECKS(skip, branch_pc);
1592 CONTINUE;
1593 }
1594
1595 CASE(_fcmpl):
1596 CASE(_fcmpg):
1597 {
1598 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1599 STACK_FLOAT(-1),
1600 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1601 -2);
1602 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1603 }
1604
1605 CASE(_dcmpl):
1606 CASE(_dcmpg):
1607 {
1608 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1609 STACK_DOUBLE(-1),
1610 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1611 MORE_STACK(-4); // Pop
1612 SET_STACK_INT(r, 0);
1613 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1614 }
1615
1616 CASE(_lcmp):
1617 {
1618 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1619 MORE_STACK(-4);
1620 SET_STACK_INT(r, 0);
1621 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1622 }
1623
1624
1625 /* Return from a method */
1626
1627 CASE(_areturn):
1628 CASE(_ireturn):
1629 CASE(_freturn):
1630 {
1631 // Allow a safepoint before returning to frame manager.
1632 SAFEPOINT;
1633
1634 goto handle_return;
1635 }
1636
1637 CASE(_lreturn):
1638 CASE(_dreturn):
1639 {
1640 // Allow a safepoint before returning to frame manager.
1641 SAFEPOINT;
1642 goto handle_return;
1643 }
1644
1645 CASE(_return_register_finalizer): {
1646
1647 oop rcvr = LOCALS_OBJECT(0);
1648 VERIFY_OOP(rcvr);
1649 if (rcvr->klass()->has_finalizer()) {
1650 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1651 }
1652 goto handle_return;
1653 }
1654 CASE(_return): {
1655
1656 // Allow a safepoint before returning to frame manager.
1657 SAFEPOINT;
1658 goto handle_return;
1659 }
1660
1661 /* Array access byte-codes */
1662
1663 /* Every array access byte-code starts out like this */
1664 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1665 #define ARRAY_INTRO(arrayOff) \
1666 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1667 jint index = STACK_INT(arrayOff + 1); \
1668 char message[jintAsStringSize]; \
1669 CHECK_NULL(arrObj); \
1670 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1671 sprintf(message, "%d", index); \
1672 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1673 message, note_rangeCheck_trap); \
1674 }
1675
1676 /* 32-bit loads. These handle conversion from < 32-bit types */
1677 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1678 { \
1679 ARRAY_INTRO(-2); \
1680 (void)extra; \
1681 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1682 -2); \
1683 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1684 }
1685
1686 /* 64-bit loads */
1687 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1688 { \
1689 ARRAY_INTRO(-2); \
1690 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1691 (void)extra; \
1692 UPDATE_PC_AND_CONTINUE(1); \
1693 }
1694
1695 CASE(_iaload):
1696 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1697 CASE(_faload):
1698 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1699 CASE(_aaload): {
1700 ARRAY_INTRO(-2);
1701 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
1702 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1703 }
1704 CASE(_baload):
1705 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1706 CASE(_caload):
1707 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1708 CASE(_saload):
1709 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1710 CASE(_laload):
1711 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1712 CASE(_daload):
1713 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1714
1715 /* 32-bit stores. These handle conversion to < 32-bit types */
1716 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1717 { \
1718 ARRAY_INTRO(-3); \
1719 (void)extra; \
1720 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1721 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1722 }
1723
1724 /* 64-bit stores */
1725 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1726 { \
1727 ARRAY_INTRO(-4); \
1728 (void)extra; \
1729 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1730 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1731 }
1732
1733 CASE(_iastore):
1734 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1735 CASE(_fastore):
1736 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1737 /*
1738 * This one looks different because of the assignability check
1739 */
1740 CASE(_aastore): {
1741 oop rhsObject = STACK_OBJECT(-1);
1742 VERIFY_OOP(rhsObject);
1743 ARRAY_INTRO( -3);
1744 // arrObj, index are set
1745 if (rhsObject != NULL) {
1746 /* Check assignability of rhsObject into arrObj */
1747 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1748 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1749 //
1750 // Check for compatibilty. This check must not GC!!
1751 // Seems way more expensive now that we must dispatch
1752 //
1753 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1754 // Decrement counter if subtype check failed.
1755 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
1756 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
1757 }
1758 // Profile checkcast with null_seen and receiver.
1759 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
1760 } else {
1761 // Profile checkcast with null_seen and receiver.
1762 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
1763 }
1764 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1765 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1766 }
1767 CASE(_bastore):
1768 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1769 CASE(_castore):
1770 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1771 CASE(_sastore):
1772 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1773 CASE(_lastore):
1774 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1775 CASE(_dastore):
1776 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1777
1778 CASE(_arraylength):
1779 {
1780 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1781 CHECK_NULL(ary);
1782 SET_STACK_INT(ary->length(), -1);
1783 UPDATE_PC_AND_CONTINUE(1);
1784 }
1785
1786 /* monitorenter and monitorexit for locking/unlocking an object */
1787
1788 CASE(_monitorenter): {
1789 oop lockee = STACK_OBJECT(-1);
1790 // derefing's lockee ought to provoke implicit null check
1791 CHECK_NULL(lockee);
1792 // find a free monitor or one already allocated for this object
1793 // if we find a matching object then we need a new monitor
1794 // since this is recursive enter
1795 BasicObjectLock* limit = istate->monitor_base();
1796 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1797 BasicObjectLock* entry = NULL;
1798 while (most_recent != limit ) {
1799 if (most_recent->obj() == NULL) entry = most_recent;
1800 else if (most_recent->obj() == lockee) break;
1801 most_recent++;
1802 }
1803 if (entry != NULL) {
1804 entry->set_obj(lockee);
1805 int success = false;
1806 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
1807
1808 markOop mark = lockee->mark();
1809 intptr_t hash = (intptr_t) markOopDesc::no_hash;
1810 // implies UseBiasedLocking
1811 if (mark->has_bias_pattern()) {
1812 uintptr_t thread_ident;
1813 uintptr_t anticipated_bias_locking_value;
1814 thread_ident = (uintptr_t)istate->thread();
1815 anticipated_bias_locking_value =
1816 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1817 ~((uintptr_t) markOopDesc::age_mask_in_place);
1818
1819 if (anticipated_bias_locking_value == 0) {
1820 // already biased towards this thread, nothing to do
1821 if (PrintBiasedLockingStatistics) {
1822 (* BiasedLocking::biased_lock_entry_count_addr())++;
1823 }
1824 success = true;
1825 }
1826 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1827 // try revoke bias
1828 markOop header = lockee->klass()->prototype_header();
1829 if (hash != markOopDesc::no_hash) {
1830 header = header->copy_set_hash(hash);
1831 }
1832 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
1833 if (PrintBiasedLockingStatistics)
1834 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1835 }
1836 }
1837 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1838 // try rebias
1839 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1840 if (hash != markOopDesc::no_hash) {
1841 new_header = new_header->copy_set_hash(hash);
1842 }
1843 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
1844 if (PrintBiasedLockingStatistics)
1845 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1846 }
1847 else {
1848 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1849 }
1850 success = true;
1851 }
1852 else {
1853 // try to bias towards thread in case object is anonymously biased
1854 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1855 (uintptr_t)markOopDesc::age_mask_in_place |
1856 epoch_mask_in_place));
1857 if (hash != markOopDesc::no_hash) {
1858 header = header->copy_set_hash(hash);
1859 }
1860 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1861 // debugging hint
1862 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1863 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
1864 if (PrintBiasedLockingStatistics)
1865 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1866 }
1867 else {
1868 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1869 }
1870 success = true;
1871 }
1872 }
1873
1874 // traditional lightweight locking
1875 if (!success) {
1876 markOop displaced = lockee->mark()->set_unlocked();
1877 entry->lock()->set_displaced_header(displaced);
1878 bool call_vm = UseHeavyMonitors;
1879 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1880 // Is it simple recursive case?
1881 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1882 entry->lock()->set_displaced_header(NULL);
1883 } else {
1884 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1885 }
1886 }
1887 }
1888 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1889 } else {
1890 istate->set_msg(more_monitors);
1891 UPDATE_PC_AND_RETURN(0); // Re-execute
1892 }
1893 }
1894
1895 CASE(_monitorexit): {
1896 oop lockee = STACK_OBJECT(-1);
1897 CHECK_NULL(lockee);
1898 // derefing's lockee ought to provoke implicit null check
1899 // find our monitor slot
1900 BasicObjectLock* limit = istate->monitor_base();
1901 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1902 while (most_recent != limit ) {
1903 if ((most_recent)->obj() == lockee) {
1904 BasicLock* lock = most_recent->lock();
1905 markOop header = lock->displaced_header();
1906 most_recent->set_obj(NULL);
1907 if (!lockee->mark()->has_bias_pattern()) {
1908 bool call_vm = UseHeavyMonitors;
1909 // If it isn't recursive we either must swap old header or call the runtime
1910 if (header != NULL || call_vm) {
1911 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1912 // restore object for the slow case
1913 most_recent->set_obj(lockee);
1914 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1915 }
1916 }
1917 }
1918 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1919 }
1920 most_recent++;
1921 }
1922 // Need to throw illegal monitor state exception
1923 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1924 ShouldNotReachHere();
1925 }
1926
1927 /* All of the non-quick opcodes. */
1928
1929 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1930 * constant pool index in the instruction.
1931 */
1932 CASE(_getfield):
1933 CASE(_getstatic):
1934 {
1935 u2 index;
1936 ConstantPoolCacheEntry* cache;
1937 index = Bytes::get_native_u2(pc+1);
1938
1939 // QQQ Need to make this as inlined as possible. Probably need to
1940 // split all the bytecode cases out so c++ compiler has a chance
1941 // for constant prop to fold everything possible away.
1942
1943 cache = cp->entry_at(index);
1944 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
1945 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
1946 handle_exception);
1947 cache = cp->entry_at(index);
1948 }
1949
1950 if (cache->is_field_entry()) {
1951 #ifdef VM_JVMTI
1952 if (_jvmti_interp_events) {
1953 int *count_addr;
1954 oop obj;
1955 // Check to see if a field modification watch has been set
1956 // before we take the time to call into the VM.
1957 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
1958 if ( *count_addr > 0 ) {
1959 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1960 obj = (oop)NULL;
1961 } else {
1962 obj = (oop) STACK_OBJECT(-1);
1963 VERIFY_OOP(obj);
1964 }
1965 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
1966 obj,
1967 cache),
1968 handle_exception);
1969 }
1970 }
1971 #endif /* VM_JVMTI */
1972
1973 oop obj;
1974 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1975 Klass* k = cache->f1_as_klass();
1976 obj = k->java_mirror();
1977 MORE_STACK(1); // Assume single slot push
1978 } else {
1979 obj = (oop) STACK_OBJECT(-1);
1980 CHECK_NULL(obj);
1981 }
1982
1983 //
1984 // Now store the result on the stack
1985 //
1986 TosState tos_type = cache->flag_state();
1987 int field_offset = cache->f2_as_index();
1988 if (cache->is_volatile()) {
1989 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1990 OrderAccess::fence();
1991 }
1992 if (tos_type == atos) {
1993 VERIFY_OOP(obj->obj_field_acquire(field_offset));
1994 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
1995 } else if (tos_type == itos) {
1996 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1997 } else if (tos_type == ltos) {
1998 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1999 MORE_STACK(1);
2000 } else if (tos_type == btos) {
2001 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
2002 } else if (tos_type == ctos) {
2003 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
2004 } else if (tos_type == stos) {
2005 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
2006 } else if (tos_type == ftos) {
2007 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
2008 } else {
2009 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
2010 MORE_STACK(1);
2011 }
2012 } else {
2013 if (tos_type == atos) {
2014 VERIFY_OOP(obj->obj_field(field_offset));
2015 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
2016 } else if (tos_type == itos) {
2017 SET_STACK_INT(obj->int_field(field_offset), -1);
2018 } else if (tos_type == ltos) {
2019 SET_STACK_LONG(obj->long_field(field_offset), 0);
2020 MORE_STACK(1);
2021 } else if (tos_type == btos) {
2022 SET_STACK_INT(obj->byte_field(field_offset), -1);
2023 } else if (tos_type == ctos) {
2024 SET_STACK_INT(obj->char_field(field_offset), -1);
2025 } else if (tos_type == stos) {
2026 SET_STACK_INT(obj->short_field(field_offset), -1);
2027 } else if (tos_type == ftos) {
2028 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2029 } else {
2030 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2031 MORE_STACK(1);
2032 }
2033 }
2034
2035 UPDATE_PC_AND_CONTINUE(3);
2036 }else {
2037 // mostly copied from _invokevirtual and _invokestatic
2038 istate->set_msg(call_method);
2039 Method* callee;
2040 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
2041 callee = cache->f1_as_method();
2042
2043 // Profile call.
2044 BI_PROFILE_UPDATE_CALL();
2045 }else {
2046 // get receiver
2047 int parms = cache->parameter_size();
2048 // this works but needs a resourcemark and seems to create a vtable on every call:
2049 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2050 //
2051 // this fails with an assert
2052 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2053 // but this works
2054 oop rcvr = STACK_OBJECT(-parms);
2055 VERIFY_OOP(rcvr);
2056 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2057 /*
2058 Executing this code in java.lang.String:
2059 public String(char value[]) {
2060 this.count = value.length;
2061 this.value = (char[])value.clone();
2062 }
2063 a find on rcvr->klass() reports:
2064 {type array char}{type array class}
2065 - klass: {other class}
2066
2067 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2068 because rcvr->klass()->is_instance_klass() == 0
2069 However it seems to have a vtable in the right location. Huh?
2070 Because vtables have the same offset for ArrayKlass and InstanceKlass.
2071 */
2072 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2073 // Profile virtual call.
2074 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2075 }
2076 istate->set_callee(callee);
2077 istate->set_callee_entry_point(callee->from_interpreted_entry());
2078 #ifdef VM_JVMTI
2079 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2080 istate->set_callee_entry_point(callee->interpreter_entry());
2081 }
2082 #endif /* VM_JVMTI */
2083 istate->set_bcp_advance(3);
2084 UPDATE_PC_AND_RETURN(0); // I'll be back...
2085 }
2086 }
2087 CASE(_putfield):
2088 CASE(_putstatic):
2089 {
2090 u2 index = Bytes::get_native_u2(pc+1);
2091 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2092 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2093 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2094 handle_exception);
2095 cache = cp->entry_at(index);
2096 }
2097
2098 #ifdef VM_JVMTI
2099 if (_jvmti_interp_events) {
2100 int *count_addr;
2101 oop obj;
2102 // Check to see if a field modification watch has been set
2103 // before we take the time to call into the VM.
2104 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
2105 if ( *count_addr > 0 ) {
2106 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2107 obj = (oop)NULL;
2108 }
2109 else {
2110 if (cache->is_long() || cache->is_double()) {
2111 obj = (oop) STACK_OBJECT(-3);
2112 } else {
2113 obj = (oop) STACK_OBJECT(-2);
2114 }
2115 VERIFY_OOP(obj);
2116 }
2117
2118 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
2119 obj,
2120 cache,
2121 (jvalue *)STACK_SLOT(-1)),
2122 handle_exception);
2123 }
2124 }
2125 #endif /* VM_JVMTI */
2126
2127 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2128 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2129
2130 oop obj;
2131 int count;
2132 TosState tos_type = cache->flag_state();
2133
2134 count = -1;
2135 if (tos_type == ltos || tos_type == dtos) {
2136 --count;
2137 }
2138 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2139 Klass* k = cache->f1_as_klass();
2140 obj = k->java_mirror();
2141 } else {
2142 --count;
2143 obj = (oop) STACK_OBJECT(count);
2144 CHECK_NULL(obj);
2145 }
2146
2147 //
2148 // Now store the result
2149 //
2150 int field_offset = cache->f2_as_index();
2151 if (cache->is_volatile()) {
2152 if (tos_type == itos) {
2153 obj->release_int_field_put(field_offset, STACK_INT(-1));
2154 } else if (tos_type == atos) {
2155 VERIFY_OOP(STACK_OBJECT(-1));
2156 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
2157 } else if (tos_type == btos) {
2158 obj->release_byte_field_put(field_offset, STACK_INT(-1));
2159 } else if (tos_type == ltos) {
2160 obj->release_long_field_put(field_offset, STACK_LONG(-1));
2161 } else if (tos_type == ctos) {
2162 obj->release_char_field_put(field_offset, STACK_INT(-1));
2163 } else if (tos_type == stos) {
2164 obj->release_short_field_put(field_offset, STACK_INT(-1));
2165 } else if (tos_type == ftos) {
2166 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
2167 } else {
2168 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
2169 }
2170 OrderAccess::storeload();
2171 } else {
2172 if (tos_type == itos) {
2173 obj->int_field_put(field_offset, STACK_INT(-1));
2174 } else if (tos_type == atos) {
2175 VERIFY_OOP(STACK_OBJECT(-1));
2176 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
2177 } else if (tos_type == btos) {
2178 obj->byte_field_put(field_offset, STACK_INT(-1));
2179 } else if (tos_type == ltos) {
2180 obj->long_field_put(field_offset, STACK_LONG(-1));
2181 } else if (tos_type == ctos) {
2182 obj->char_field_put(field_offset, STACK_INT(-1));
2183 } else if (tos_type == stos) {
2184 obj->short_field_put(field_offset, STACK_INT(-1));
2185 } else if (tos_type == ftos) {
2186 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2187 } else {
2188 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2189 }
2190 }
2191
2192 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
2193 }
2194
2195 CASE(_new): {
2196 u2 index = Bytes::get_Java_u2(pc+1);
2197 ConstantPool* constants = istate->method()->constants();
2198 if (!constants->tag_at(index).is_unresolved_klass()) {
2199 // Make sure klass is initialized and doesn't have a finalizer
2200 Klass* entry = constants->slot_at(index).get_klass();
2201 InstanceKlass* ik = InstanceKlass::cast(entry);
2202 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
2203 size_t obj_size = ik->size_helper();
2204 oop result = NULL;
2205 // If the TLAB isn't pre-zeroed then we'll have to do it
2206 bool need_zero = !ZeroTLAB;
2207 if (UseTLAB) {
2208 result = (oop) THREAD->tlab().allocate(obj_size);
2209 }
2210 // Disable non-TLAB-based fast-path, because profiling requires that all
2211 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
2212 // returns NULL.
2213 #ifndef CC_INTERP_PROFILE
2214 if (result == NULL) {
2215 need_zero = true;
2216 // Try allocate in shared eden
2217 retry:
2218 HeapWord* compare_to = *Universe::heap()->top_addr();
2219 HeapWord* new_top = compare_to + obj_size;
2220 if (new_top <= *Universe::heap()->end_addr()) {
2221 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
2222 goto retry;
2223 }
2224 result = (oop) compare_to;
2225 }
2226 }
2227 #endif
2228 if (result != NULL) {
2229 // Initialize object (if nonzero size and need) and then the header
2230 if (need_zero ) {
2231 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2232 obj_size -= sizeof(oopDesc) / oopSize;
2233 if (obj_size > 0 ) {
2234 memset(to_zero, 0, obj_size * HeapWordSize);
2235 }
2236 }
2237 if (UseBiasedLocking) {
2238 result->set_mark(ik->prototype_header());
2239 } else {
2240 result->set_mark(markOopDesc::prototype());
2241 }
2242 result->set_klass_gap(0);
2243 result->set_klass(ik);
2244 // Must prevent reordering of stores for object initialization
2245 // with stores that publish the new object.
2246 OrderAccess::storestore();
2247 SET_STACK_OBJECT(result, 0);
2248 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2249 }
2250 }
2251 }
2252 // Slow case allocation
2253 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2254 handle_exception);
2255 // Must prevent reordering of stores for object initialization
2256 // with stores that publish the new object.
2257 OrderAccess::storestore();
2258 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2259 THREAD->set_vm_result(NULL);
2260 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2261 }
2262 CASE(_anewarray): {
2263 u2 index = Bytes::get_Java_u2(pc+1);
2264 jint size = STACK_INT(-1);
2265 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2266 handle_exception);
2267 // Must prevent reordering of stores for object initialization
2268 // with stores that publish the new object.
2269 OrderAccess::storestore();
2270 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2271 THREAD->set_vm_result(NULL);
2272 UPDATE_PC_AND_CONTINUE(3);
2273 }
2274 CASE(_multianewarray): {
2275 jint dims = *(pc+3);
2276 jint size = STACK_INT(-1);
2277 // stack grows down, dimensions are up!
2278 jint *dimarray =
2279 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2280 Interpreter::stackElementWords-1];
2281 //adjust pointer to start of stack element
2282 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2283 handle_exception);
2284 // Must prevent reordering of stores for object initialization
2285 // with stores that publish the new object.
2286 OrderAccess::storestore();
2287 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2288 THREAD->set_vm_result(NULL);
2289 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2290 }
2291 CASE(_checkcast):
2292 if (STACK_OBJECT(-1) != NULL) {
2293 VERIFY_OOP(STACK_OBJECT(-1));
2294 u2 index = Bytes::get_Java_u2(pc+1);
2295 // Constant pool may have actual klass or unresolved klass. If it is
2296 // unresolved we must resolve it.
2297 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2298 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2299 }
2300 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2301 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2302 //
2303 // Check for compatibilty. This check must not GC!!
2304 // Seems way more expensive now that we must dispatch.
2305 //
2306 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2307 // Decrement counter at checkcast.
2308 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2309 ResourceMark rm(THREAD);
2310 const char* objName = objKlass->external_name();
2311 const char* klassName = klassOf->external_name();
2312 char* message = SharedRuntime::generate_class_cast_message(
2313 objName, klassName);
2314 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
2315 }
2316 // Profile checkcast with null_seen and receiver.
2317 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
2318 } else {
2319 // Profile checkcast with null_seen and receiver.
2320 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
2321 }
2322 UPDATE_PC_AND_CONTINUE(3);
2323
2324 CASE(_instanceof):
2325 if (STACK_OBJECT(-1) == NULL) {
2326 SET_STACK_INT(0, -1);
2327 // Profile instanceof with null_seen and receiver.
2328 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
2329 } else {
2330 VERIFY_OOP(STACK_OBJECT(-1));
2331 u2 index = Bytes::get_Java_u2(pc+1);
2332 // Constant pool may have actual klass or unresolved klass. If it is
2333 // unresolved we must resolve it.
2334 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2335 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2336 }
2337 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2338 Klass* objKlass = STACK_OBJECT(-1)->klass();
2339 //
2340 // Check for compatibilty. This check must not GC!!
2341 // Seems way more expensive now that we must dispatch.
2342 //
2343 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2344 SET_STACK_INT(1, -1);
2345 } else {
2346 SET_STACK_INT(0, -1);
2347 // Decrement counter at checkcast.
2348 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2349 }
2350 // Profile instanceof with null_seen and receiver.
2351 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
2352 }
2353 UPDATE_PC_AND_CONTINUE(3);
2354
2355 CASE(_ldc_w):
2356 CASE(_ldc):
2357 {
2358 u2 index;
2359 bool wide = false;
2360 int incr = 2; // frequent case
2361 if (opcode == Bytecodes::_ldc) {
2362 index = pc[1];
2363 } else {
2364 index = Bytes::get_Java_u2(pc+1);
2365 incr = 3;
2366 wide = true;
2367 }
2368
2369 ConstantPool* constants = METHOD->constants();
2370 switch (constants->tag_at(index).value()) {
2371 case JVM_CONSTANT_Integer:
2372 SET_STACK_INT(constants->int_at(index), 0);
2373 break;
2374
2375 case JVM_CONSTANT_Float:
2376 SET_STACK_FLOAT(constants->float_at(index), 0);
2377 break;
2378
2379 case JVM_CONSTANT_String:
2380 {
2381 oop result = constants->resolved_references()->obj_at(index);
2382 if (result == NULL) {
2383 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2384 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2385 THREAD->set_vm_result(NULL);
2386 } else {
2387 VERIFY_OOP(result);
2388 SET_STACK_OBJECT(result, 0);
2389 }
2390 break;
2391 }
2392
2393 case JVM_CONSTANT_Class:
2394 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2395 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2396 break;
2397
2398 case JVM_CONSTANT_UnresolvedClass:
2399 case JVM_CONSTANT_UnresolvedClassInError:
2400 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2401 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2402 THREAD->set_vm_result(NULL);
2403 break;
2404
2405 default: ShouldNotReachHere();
2406 }
2407 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2408 }
2409
2410 CASE(_ldc2_w):
2411 {
2412 u2 index = Bytes::get_Java_u2(pc+1);
2413
2414 ConstantPool* constants = METHOD->constants();
2415 switch (constants->tag_at(index).value()) {
2416
2417 case JVM_CONSTANT_Long:
2418 SET_STACK_LONG(constants->long_at(index), 1);
2419 break;
2420
2421 case JVM_CONSTANT_Double:
2422 SET_STACK_DOUBLE(constants->double_at(index), 1);
2423 break;
2424 default: ShouldNotReachHere();
2425 }
2426 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2427 }
2428
2429 CASE(_fast_aldc_w):
2430 CASE(_fast_aldc): {
2431 u2 index;
2432 int incr;
2433 if (opcode == Bytecodes::_fast_aldc) {
2434 index = pc[1];
2435 incr = 2;
2436 } else {
2437 index = Bytes::get_native_u2(pc+1);
2438 incr = 3;
2439 }
2440
2441 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2442 // This kind of CP cache entry does not need to match the flags byte, because
2443 // there is a 1-1 relation between bytecode type and CP entry type.
2444 ConstantPool* constants = METHOD->constants();
2445 oop result = constants->resolved_references()->obj_at(index);
2446 if (result == NULL) {
2447 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2448 handle_exception);
2449 result = THREAD->vm_result();
2450 }
2451
2452 VERIFY_OOP(result);
2453 SET_STACK_OBJECT(result, 0);
2454 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2455 }
2456
2457 CASE(_invokedynamic): {
2458
2459 u4 index = Bytes::get_native_u4(pc+1);
2460 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2461
2462 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
2463 // This kind of CP cache entry does not need to match the flags byte, because
2464 // there is a 1-1 relation between bytecode type and CP entry type.
2465 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2466 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2467 handle_exception);
2468 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2469 }
2470
2471 Method* method = cache->f1_as_method();
2472 if (VerifyOops) method->verify();
2473
2474 if (cache->has_appendix()) {
2475 ConstantPool* constants = METHOD->constants();
2476 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2477 MORE_STACK(1);
2478 }
2479
2480 istate->set_msg(call_method);
2481 istate->set_callee(method);
2482 istate->set_callee_entry_point(method->from_interpreted_entry());
2483 istate->set_bcp_advance(5);
2484
2485 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
2486 BI_PROFILE_UPDATE_CALL();
2487
2488 UPDATE_PC_AND_RETURN(0); // I'll be back...
2489 }
2490
2491 CASE(_invokehandle): {
2492
2493 u2 index = Bytes::get_native_u2(pc+1);
2494 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2495
2496 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2497 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2498 handle_exception);
2499 cache = cp->entry_at(index);
2500 }
2501
2502 Method* method = cache->f1_as_method();
2503 if (VerifyOops) method->verify();
2504
2505 if (cache->has_appendix()) {
2506 ConstantPool* constants = METHOD->constants();
2507 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2508 MORE_STACK(1);
2509 }
2510
2511 istate->set_msg(call_method);
2512 istate->set_callee(method);
2513 istate->set_callee_entry_point(method->from_interpreted_entry());
2514 istate->set_bcp_advance(3);
2515
2516 // Invokehandle has got a call counter, just like a final call -> increment!
2517 BI_PROFILE_UPDATE_FINALCALL();
2518
2519 UPDATE_PC_AND_RETURN(0); // I'll be back...
2520 }
2521
2522 CASE(_invokeinterface): {
2523 u2 index = Bytes::get_native_u2(pc+1);
2524
2525 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2526 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2527
2528 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2529 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2530 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2531 handle_exception);
2532 cache = cp->entry_at(index);
2533 }
2534
2535 istate->set_msg(call_method);
2536
2537 // Special case of invokeinterface called for virtual method of
2538 // java.lang.Object. See cpCacheOop.cpp for details.
2539 // This code isn't produced by javac, but could be produced by
2540 // another compliant java compiler.
2541 if (cache->is_forced_virtual()) {
2542 Method* callee;
2543 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2544 if (cache->is_vfinal()) {
2545 callee = cache->f2_as_vfinal_method();
2546 // Profile 'special case of invokeinterface' final call.
2547 BI_PROFILE_UPDATE_FINALCALL();
2548 } else {
2549 // Get receiver.
2550 int parms = cache->parameter_size();
2551 // Same comments as invokevirtual apply here.
2552 oop rcvr = STACK_OBJECT(-parms);
2553 VERIFY_OOP(rcvr);
2554 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2555 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2556 // Profile 'special case of invokeinterface' virtual call.
2557 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2558 }
2559 istate->set_callee(callee);
2560 istate->set_callee_entry_point(callee->from_interpreted_entry());
2561 #ifdef VM_JVMTI
2562 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2563 istate->set_callee_entry_point(callee->interpreter_entry());
2564 }
2565 #endif /* VM_JVMTI */
2566 istate->set_bcp_advance(5);
2567 UPDATE_PC_AND_RETURN(0); // I'll be back...
2568 }
2569
2570 // this could definitely be cleaned up QQQ
2571 Method* callee;
2572 Klass* iclass = cache->f1_as_klass();
2573 // InstanceKlass* interface = (InstanceKlass*) iclass;
2574 // get receiver
2575 int parms = cache->parameter_size();
2576 oop rcvr = STACK_OBJECT(-parms);
2577 CHECK_NULL(rcvr);
2578 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2579 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2580 int i;
2581 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2582 if (ki->interface_klass() == iclass) break;
2583 }
2584 // If the interface isn't found, this class doesn't implement this
2585 // interface. The link resolver checks this but only for the first
2586 // time this interface is called.
2587 if (i == int2->itable_length()) {
2588 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
2589 }
2590 int mindex = cache->f2_as_index();
2591 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2592 callee = im[mindex].method();
2593 if (callee == NULL) {
2594 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
2595 }
2596
2597 // Profile virtual call.
2598 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2599
2600 istate->set_callee(callee);
2601 istate->set_callee_entry_point(callee->from_interpreted_entry());
2602 #ifdef VM_JVMTI
2603 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2604 istate->set_callee_entry_point(callee->interpreter_entry());
2605 }
2606 #endif /* VM_JVMTI */
2607 istate->set_bcp_advance(5);
2608 UPDATE_PC_AND_RETURN(0); // I'll be back...
2609 }
2610
2611 CASE(_invokevirtual):
2612 CASE(_invokespecial):
2613 CASE(_invokestatic): {
2614 u2 index = Bytes::get_native_u2(pc+1);
2615
2616 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2617 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2618 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2619
2620 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2621 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode),
2622 handle_exception);
2623 cache = cp->entry_at(index);
2624 }
2625
2626 istate->set_msg(call_method);
2627 {
2628 Method* callee;
2629 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2630 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2631 if (cache->is_vfinal()) {
2632 callee = cache->f2_as_vfinal_method();
2633 // Profile final call.
2634 BI_PROFILE_UPDATE_FINALCALL();
2635 } else {
2636 // get receiver
2637 int parms = cache->parameter_size();
2638 // this works but needs a resourcemark and seems to create a vtable on every call:
2639 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2640 //
2641 // this fails with an assert
2642 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2643 // but this works
2644 oop rcvr = STACK_OBJECT(-parms);
2645 VERIFY_OOP(rcvr);
2646 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2647 /*
2648 Executing this code in java.lang.String:
2649 public String(char value[]) {
2650 this.count = value.length;
2651 this.value = (char[])value.clone();
2652 }
2653
2654 a find on rcvr->klass() reports:
2655 {type array char}{type array class}
2656 - klass: {other class}
2657
2658 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2659 because rcvr->klass()->is_instance_klass() == 0
2660 However it seems to have a vtable in the right location. Huh?
2661 Because vtables have the same offset for ArrayKlass and InstanceKlass.
2662 */
2663 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2664 // Profile virtual call.
2665 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2666 }
2667 } else {
2668 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2669 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2670 }
2671 callee = cache->f1_as_method();
2672
2673 // Profile call.
2674 BI_PROFILE_UPDATE_CALL();
2675 }
2676
2677 istate->set_callee(callee);
2678 istate->set_callee_entry_point(callee->from_interpreted_entry());
2679 #ifdef VM_JVMTI
2680 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2681 istate->set_callee_entry_point(callee->interpreter_entry());
2682 }
2683 #endif /* VM_JVMTI */
2684 istate->set_bcp_advance(3);
2685 UPDATE_PC_AND_RETURN(0); // I'll be back...
2686 }
2687 }
2688
2689 /* Allocate memory for a new java object. */
2690
2691 CASE(_newarray): {
2692 BasicType atype = (BasicType) *(pc+1);
2693 jint size = STACK_INT(-1);
2694 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2695 handle_exception);
2696 // Must prevent reordering of stores for object initialization
2697 // with stores that publish the new object.
2698 OrderAccess::storestore();
2699 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2700 THREAD->set_vm_result(NULL);
2701
2702 UPDATE_PC_AND_CONTINUE(2);
2703 }
2704
2705 /* Throw an exception. */
2706
2707 CASE(_athrow): {
2708 oop except_oop = STACK_OBJECT(-1);
2709 CHECK_NULL(except_oop);
2710 // set pending_exception so we use common code
2711 THREAD->set_pending_exception(except_oop, NULL, 0);
2712 goto handle_exception;
2713 }
2714
2715 /* goto and jsr. They are exactly the same except jsr pushes
2716 * the address of the next instruction first.
2717 */
2718
2719 CASE(_jsr): {
2720 /* push bytecode index on stack */
2721 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2722 MORE_STACK(1);
2723 /* FALL THROUGH */
2724 }
2725
2726 CASE(_goto):
2727 {
2728 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2729 // Profile jump.
2730 BI_PROFILE_UPDATE_JUMP();
2731 address branch_pc = pc;
2732 UPDATE_PC(offset);
2733 DO_BACKEDGE_CHECKS(offset, branch_pc);
2734 CONTINUE;
2735 }
2736
2737 CASE(_jsr_w): {
2738 /* push return address on the stack */
2739 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2740 MORE_STACK(1);
2741 /* FALL THROUGH */
2742 }
2743
2744 CASE(_goto_w):
2745 {
2746 int32_t offset = Bytes::get_Java_u4(pc + 1);
2747 // Profile jump.
2748 BI_PROFILE_UPDATE_JUMP();
2749 address branch_pc = pc;
2750 UPDATE_PC(offset);
2751 DO_BACKEDGE_CHECKS(offset, branch_pc);
2752 CONTINUE;
2753 }
2754
2755 /* return from a jsr or jsr_w */
2756
2757 CASE(_ret): {
2758 // Profile ret.
2759 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
2760 // Now, update the pc.
2761 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2762 UPDATE_PC_AND_CONTINUE(0);
2763 }
2764
2765 /* debugger breakpoint */
2766
2767 CASE(_breakpoint): {
2768 Bytecodes::Code original_bytecode;
2769 DECACHE_STATE();
2770 SET_LAST_JAVA_FRAME();
2771 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2772 METHOD, pc);
2773 RESET_LAST_JAVA_FRAME();
2774 CACHE_STATE();
2775 if (THREAD->has_pending_exception()) goto handle_exception;
2776 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2777 handle_exception);
2778
2779 opcode = (jubyte)original_bytecode;
2780 goto opcode_switch;
2781 }
2782
2783 DEFAULT:
2784 fatal("Unimplemented opcode %d = %s", opcode,
2785 Bytecodes::name((Bytecodes::Code)opcode));
2786 goto finish;
2787
2788 } /* switch(opc) */
2789
2790
2791 #ifdef USELABELS
2792 check_for_exception:
2793 #endif
2794 {
2795 if (!THREAD->has_pending_exception()) {
2796 CONTINUE;
2797 }
2798 /* We will be gcsafe soon, so flush our state. */
2799 DECACHE_PC();
2800 goto handle_exception;
2801 }
2802 do_continue: ;
2803
2804 } /* while (1) interpreter loop */
2805
2806
2807 // An exception exists in the thread state see whether this activation can handle it
2808 handle_exception: {
2809
2810 HandleMarkCleaner __hmc(THREAD);
2811 Handle except_oop(THREAD, THREAD->pending_exception());
2812 // Prevent any subsequent HandleMarkCleaner in the VM
2813 // from freeing the except_oop handle.
2814 HandleMark __hm(THREAD);
2815
2816 THREAD->clear_pending_exception();
2817 assert(except_oop(), "No exception to process");
2818 intptr_t continuation_bci;
2819 // expression stack is emptied
2820 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2821 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2822 handle_exception);
2823
2824 except_oop = THREAD->vm_result();
2825 THREAD->set_vm_result(NULL);
2826 if (continuation_bci >= 0) {
2827 // Place exception on top of stack
2828 SET_STACK_OBJECT(except_oop(), 0);
2829 MORE_STACK(1);
2830 pc = METHOD->code_base() + continuation_bci;
2831 if (TraceExceptions) {
2832 ttyLocker ttyl;
2833 ResourceMark rm;
2834 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
2835 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2836 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2837 (int)(istate->bcp() - METHOD->code_base()),
2838 (int)continuation_bci, p2i(THREAD));
2839 }
2840 // for AbortVMOnException flag
2841 Exceptions::debug_check_abort(except_oop);
2842
2843 // Update profiling data.
2844 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
2845 goto run;
2846 }
2847 if (TraceExceptions) {
2848 ttyLocker ttyl;
2849 ResourceMark rm;
2850 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
2851 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2852 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
2853 (int)(istate->bcp() - METHOD->code_base()),
2854 p2i(THREAD));
2855 }
2856 // for AbortVMOnException flag
2857 Exceptions::debug_check_abort(except_oop);
2858
2859 // No handler in this activation, unwind and try again
2860 THREAD->set_pending_exception(except_oop(), NULL, 0);
2861 goto handle_return;
2862 } // handle_exception:
2863
2864 // Return from an interpreter invocation with the result of the interpretation
2865 // on the top of the Java Stack (or a pending exception)
2866
2867 handle_Pop_Frame: {
2868
2869 // We don't really do anything special here except we must be aware
2870 // that we can get here without ever locking the method (if sync).
2871 // Also we skip the notification of the exit.
2872
2873 istate->set_msg(popping_frame);
2874 // Clear pending so while the pop is in process
2875 // we don't start another one if a call_vm is done.
2876 THREAD->clr_pop_frame_pending();
2877 // Let interpreter (only) see the we're in the process of popping a frame
2878 THREAD->set_pop_frame_in_process();
2879
2880 goto handle_return;
2881
2882 } // handle_Pop_Frame
2883
2884 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2885 // given by the invoker of the early return.
2886 handle_Early_Return: {
2887
2888 istate->set_msg(early_return);
2889
2890 // Clear expression stack.
2891 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2892
2893 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
2894
2895 // Push the value to be returned.
2896 switch (istate->method()->result_type()) {
2897 case T_BOOLEAN:
2898 case T_SHORT:
2899 case T_BYTE:
2900 case T_CHAR:
2901 case T_INT:
2902 SET_STACK_INT(ts->earlyret_value().i, 0);
2903 MORE_STACK(1);
2904 break;
2905 case T_LONG:
2906 SET_STACK_LONG(ts->earlyret_value().j, 1);
2907 MORE_STACK(2);
2908 break;
2909 case T_FLOAT:
2910 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
2911 MORE_STACK(1);
2912 break;
2913 case T_DOUBLE:
2914 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2915 MORE_STACK(2);
2916 break;
2917 case T_ARRAY:
2918 case T_OBJECT:
2919 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2920 MORE_STACK(1);
2921 break;
2922 }
2923
2924 ts->clr_earlyret_value();
2925 ts->set_earlyret_oop(NULL);
2926 ts->clr_earlyret_pending();
2927
2928 // Fall through to handle_return.
2929
2930 } // handle_Early_Return
2931
2932 handle_return: {
2933 // A storestore barrier is required to order initialization of
2934 // final fields with publishing the reference to the object that
2935 // holds the field. Without the barrier the value of final fields
2936 // can be observed to change.
2937 OrderAccess::storestore();
2938
2939 DECACHE_STATE();
2940
2941 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
2942 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
2943 Handle original_exception(THREAD, THREAD->pending_exception());
2944 Handle illegal_state_oop(THREAD, NULL);
2945
2946 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2947 // in any following VM entries from freeing our live handles, but illegal_state_oop
2948 // isn't really allocated yet and so doesn't become live until later and
2949 // in unpredicatable places. Instead we must protect the places where we enter the
2950 // VM. It would be much simpler (and safer) if we could allocate a real handle with
2951 // a NULL oop in it and then overwrite the oop later as needed. This isn't
2952 // unfortunately isn't possible.
2953
2954 THREAD->clear_pending_exception();
2955
2956 //
2957 // As far as we are concerned we have returned. If we have a pending exception
2958 // that will be returned as this invocation's result. However if we get any
2959 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
2960 // will be our final result (i.e. monitor exception trumps a pending exception).
2961 //
2962
2963 // If we never locked the method (or really passed the point where we would have),
2964 // there is no need to unlock it (or look for other monitors), since that
2965 // could not have happened.
2966
2967 if (THREAD->do_not_unlock()) {
2968
2969 // Never locked, reset the flag now because obviously any caller must
2970 // have passed their point of locking for us to have gotten here.
2971
2972 THREAD->clr_do_not_unlock();
2973 } else {
2974 // At this point we consider that we have returned. We now check that the
2975 // locks were properly block structured. If we find that they were not
2976 // used properly we will return with an illegal monitor exception.
2977 // The exception is checked by the caller not the callee since this
2978 // checking is considered to be part of the invocation and therefore
2979 // in the callers scope (JVM spec 8.13).
2980 //
2981 // Another weird thing to watch for is if the method was locked
2982 // recursively and then not exited properly. This means we must
2983 // examine all the entries in reverse time(and stack) order and
2984 // unlock as we find them. If we find the method monitor before
2985 // we are at the initial entry then we should throw an exception.
2986 // It is not clear the template based interpreter does this
2987 // correctly
2988
2989 BasicObjectLock* base = istate->monitor_base();
2990 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2991 bool method_unlock_needed = METHOD->is_synchronized();
2992 // We know the initial monitor was used for the method don't check that
2993 // slot in the loop
2994 if (method_unlock_needed) base--;
2995
2996 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2997 while (end < base) {
2998 oop lockee = end->obj();
2999 if (lockee != NULL) {
3000 BasicLock* lock = end->lock();
3001 markOop header = lock->displaced_header();
3002 end->set_obj(NULL);
3003
3004 if (!lockee->mark()->has_bias_pattern()) {
3005 // If it isn't recursive we either must swap old header or call the runtime
3006 if (header != NULL) {
3007 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
3008 // restore object for the slow case
3009 end->set_obj(lockee);
3010 {
3011 // Prevent any HandleMarkCleaner from freeing our live handles
3012 HandleMark __hm(THREAD);
3013 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
3014 }
3015 }
3016 }
3017 }
3018 // One error is plenty
3019 if (illegal_state_oop() == NULL && !suppress_error) {
3020 {
3021 // Prevent any HandleMarkCleaner from freeing our live handles
3022 HandleMark __hm(THREAD);
3023 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3024 }
3025 assert(THREAD->has_pending_exception(), "Lost our exception!");
3026 illegal_state_oop = THREAD->pending_exception();
3027 THREAD->clear_pending_exception();
3028 }
3029 }
3030 end++;
3031 }
3032 // Unlock the method if needed
3033 if (method_unlock_needed) {
3034 if (base->obj() == NULL) {
3035 // The method is already unlocked this is not good.
3036 if (illegal_state_oop() == NULL && !suppress_error) {
3037 {
3038 // Prevent any HandleMarkCleaner from freeing our live handles
3039 HandleMark __hm(THREAD);
3040 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3041 }
3042 assert(THREAD->has_pending_exception(), "Lost our exception!");
3043 illegal_state_oop = THREAD->pending_exception();
3044 THREAD->clear_pending_exception();
3045 }
3046 } else {
3047 //
3048 // The initial monitor is always used for the method
3049 // However if that slot is no longer the oop for the method it was unlocked
3050 // and reused by something that wasn't unlocked!
3051 //
3052 // deopt can come in with rcvr dead because c2 knows
3053 // its value is preserved in the monitor. So we can't use locals[0] at all
3054 // and must use first monitor slot.
3055 //
3056 oop rcvr = base->obj();
3057 if (rcvr == NULL) {
3058 if (!suppress_error) {
3059 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
3060 illegal_state_oop = THREAD->pending_exception();
3061 THREAD->clear_pending_exception();
3062 }
3063 } else if (UseHeavyMonitors) {
3064 {
3065 // Prevent any HandleMarkCleaner from freeing our live handles.
3066 HandleMark __hm(THREAD);
3067 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3068 }
3069 if (THREAD->has_pending_exception()) {
3070 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3071 THREAD->clear_pending_exception();
3072 }
3073 } else {
3074 BasicLock* lock = base->lock();
3075 markOop header = lock->displaced_header();
3076 base->set_obj(NULL);
3077
3078 if (!rcvr->mark()->has_bias_pattern()) {
3079 base->set_obj(NULL);
3080 // If it isn't recursive we either must swap old header or call the runtime
3081 if (header != NULL) {
3082 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
3083 // restore object for the slow case
3084 base->set_obj(rcvr);
3085 {
3086 // Prevent any HandleMarkCleaner from freeing our live handles
3087 HandleMark __hm(THREAD);
3088 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3089 }
3090 if (THREAD->has_pending_exception()) {
3091 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3092 THREAD->clear_pending_exception();
3093 }
3094 }
3095 }
3096 }
3097 }
3098 }
3099 }
3100 }
3101 // Clear the do_not_unlock flag now.
3102 THREAD->clr_do_not_unlock();
3103
3104 //
3105 // Notify jvmti/jvmdi
3106 //
3107 // NOTE: we do not notify a method_exit if we have a pending exception,
3108 // including an exception we generate for unlocking checks. In the former
3109 // case, JVMDI has already been notified by our call for the exception handler
3110 // and in both cases as far as JVMDI is concerned we have already returned.
3111 // If we notify it again JVMDI will be all confused about how many frames
3112 // are still on the stack (4340444).
3113 //
3114 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
3115 // method_exit events whenever we leave an activation unless it was done
3116 // for popframe. This is nothing like jvmdi. However we are passing the
3117 // tests at the moment (apparently because they are jvmdi based) so rather
3118 // than change this code and possibly fail tests we will leave it alone
3119 // (with this note) in anticipation of changing the vm and the tests
3120 // simultaneously.
3121
3122
3123 //
3124 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
3125
3126
3127
3128 #ifdef VM_JVMTI
3129 if (_jvmti_interp_events) {
3130 // Whenever JVMTI puts a thread in interp_only_mode, method
3131 // entry/exit events are sent for that thread to track stack depth.
3132 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
3133 {
3134 // Prevent any HandleMarkCleaner from freeing our live handles
3135 HandleMark __hm(THREAD);
3136 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3137 }
3138 }
3139 }
3140 #endif /* VM_JVMTI */
3141
3142 //
3143 // See if we are returning any exception
3144 // A pending exception that was pending prior to a possible popping frame
3145 // overrides the popping frame.
3146 //
3147 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
3148 if (illegal_state_oop() != NULL || original_exception() != NULL) {
3149 // Inform the frame manager we have no result.
3150 istate->set_msg(throwing_exception);
3151 if (illegal_state_oop() != NULL)
3152 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
3153 else
3154 THREAD->set_pending_exception(original_exception(), NULL, 0);
3155 UPDATE_PC_AND_RETURN(0);
3156 }
3157
3158 if (istate->msg() == popping_frame) {
3159 // Make it simpler on the assembly code and set the message for the frame pop.
3160 // returns
3161 if (istate->prev() == NULL) {
3162 // We must be returning to a deoptimized frame (because popframe only happens between
3163 // two interpreted frames). We need to save the current arguments in C heap so that
3164 // the deoptimized frame when it restarts can copy the arguments to its expression
3165 // stack and re-execute the call. We also have to notify deoptimization that this
3166 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3167 // java expression stack. Yuck.
3168 //
3169 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3170 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3171 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3172 }
3173 } else {
3174 istate->set_msg(return_from_method);
3175 }
3176
3177 // Normal return
3178 // Advance the pc and return to frame manager
3179 UPDATE_PC_AND_RETURN(1);
3180 } /* handle_return: */
3181
3182 // This is really a fatal error return
3183
3184 finish:
3185 DECACHE_TOS();
3186 DECACHE_PC();
3187
3188 return;
3189 }
3190
3191 /*
3192 * All the code following this point is only produced once and is not present
3193 * in the JVMTI version of the interpreter
3194 */
3195
3196 #ifndef VM_JVMTI
3197
3198 // This constructor should only be used to contruct the object to signal
3199 // interpreter initialization. All other instances should be created by
3200 // the frame manager.
3201 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3202 if (msg != initialize) ShouldNotReachHere();
3203 _msg = msg;
3204 _self_link = this;
3205 _prev_link = NULL;
3206 }
3207
3208 // Inline static functions for Java Stack and Local manipulation
3209
3210 // The implementations are platform dependent. We have to worry about alignment
3211 // issues on some machines which can change on the same platform depending on
3212 // whether it is an LP64 machine also.
3213 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
3214 return (address) tos[Interpreter::expr_index_at(-offset)];
3215 }
3216
3217 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
3218 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
3219 }
3220
3221 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
3222 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
3223 }
3224
3225 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
3226 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
3227 }
3228
3229 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
3230 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
3231 }
3232
3233 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
3234 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
3235 }
3236
3237 // only used for value types
3238 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
3239 int offset) {
3240 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3241 }
3242
3243 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
3244 int offset) {
3245 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3246 }
3247
3248 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
3249 int offset) {
3250 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3251 }
3252
3253 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
3254 int offset) {
3255 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3256 }
3257
3258 // needs to be platform dep for the 32 bit platforms.
3259 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
3260 int offset) {
3261 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
3262 }
3263
3264 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
3265 address addr, int offset) {
3266 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
3267 ((VMJavaVal64*)addr)->d);
3268 }
3269
3270 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
3271 int offset) {
3272 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3273 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
3274 }
3275
3276 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
3277 address addr, int offset) {
3278 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3279 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
3280 ((VMJavaVal64*)addr)->l;
3281 }
3282
3283 // Locals
3284
3285 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
3286 return (address)locals[Interpreter::local_index_at(-offset)];
3287 }
3288 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
3289 return (jint)locals[Interpreter::local_index_at(-offset)];
3290 }
3291 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
3292 return (jfloat)locals[Interpreter::local_index_at(-offset)];
3293 }
3294 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
3295 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
3296 }
3297 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
3298 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
3299 }
3300 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
3301 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
3302 }
3303
3304 // Returns the address of locals value.
3305 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
3306 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3307 }
3308 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
3309 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3310 }
3311
3312 // Used for local value or returnAddress
3313 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
3314 address value, int offset) {
3315 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
3316 }
3317 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
3318 jint value, int offset) {
3319 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
3320 }
3321 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
3322 jfloat value, int offset) {
3323 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
3324 }
3325 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
3326 oop value, int offset) {
3327 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
3328 }
3329 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
3330 jdouble value, int offset) {
3331 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
3332 }
3333 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
3334 jlong value, int offset) {
3335 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
3336 }
3337 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
3338 address addr, int offset) {
3339 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
3340 }
3341 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
3342 address addr, int offset) {
3343 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
3344 }
3345
3346 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3347 intptr_t* locals, int locals_offset) {
3348 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3349 locals[Interpreter::local_index_at(-locals_offset)] = value;
3350 }
3351
3352
3353 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3354 int to_offset) {
3355 tos[Interpreter::expr_index_at(-to_offset)] =
3356 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3357 }
3358
3359 void BytecodeInterpreter::dup(intptr_t *tos) {
3360 copy_stack_slot(tos, -1, 0);
3361 }
3362 void BytecodeInterpreter::dup2(intptr_t *tos) {
3363 copy_stack_slot(tos, -2, 0);
3364 copy_stack_slot(tos, -1, 1);
3365 }
3366
3367 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3368 /* insert top word two down */
3369 copy_stack_slot(tos, -1, 0);
3370 copy_stack_slot(tos, -2, -1);
3371 copy_stack_slot(tos, 0, -2);
3372 }
3373
3374 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3375 /* insert top word three down */
3376 copy_stack_slot(tos, -1, 0);
3377 copy_stack_slot(tos, -2, -1);
3378 copy_stack_slot(tos, -3, -2);
3379 copy_stack_slot(tos, 0, -3);
3380 }
3381 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3382 /* insert top 2 slots three down */
3383 copy_stack_slot(tos, -1, 1);
3384 copy_stack_slot(tos, -2, 0);
3385 copy_stack_slot(tos, -3, -1);
3386 copy_stack_slot(tos, 1, -2);
3387 copy_stack_slot(tos, 0, -3);
3388 }
3389 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3390 /* insert top 2 slots four down */
3391 copy_stack_slot(tos, -1, 1);
3392 copy_stack_slot(tos, -2, 0);
3393 copy_stack_slot(tos, -3, -1);
3394 copy_stack_slot(tos, -4, -2);
3395 copy_stack_slot(tos, 1, -3);
3396 copy_stack_slot(tos, 0, -4);
3397 }
3398
3399
3400 void BytecodeInterpreter::swap(intptr_t *tos) {
3401 // swap top two elements
3402 intptr_t val = tos[Interpreter::expr_index_at(1)];
3403 // Copy -2 entry to -1
3404 copy_stack_slot(tos, -2, -1);
3405 // Store saved -1 entry into -2
3406 tos[Interpreter::expr_index_at(2)] = val;
3407 }
3408 // --------------------------------------------------------------------------------
3409 // Non-product code
3410 #ifndef PRODUCT
3411
3412 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3413 switch (msg) {
3414 case BytecodeInterpreter::no_request: return("no_request");
3415 case BytecodeInterpreter::initialize: return("initialize");
3416 // status message to C++ interpreter
3417 case BytecodeInterpreter::method_entry: return("method_entry");
3418 case BytecodeInterpreter::method_resume: return("method_resume");
3419 case BytecodeInterpreter::got_monitors: return("got_monitors");
3420 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3421 // requests to frame manager from C++ interpreter
3422 case BytecodeInterpreter::call_method: return("call_method");
3423 case BytecodeInterpreter::return_from_method: return("return_from_method");
3424 case BytecodeInterpreter::more_monitors: return("more_monitors");
3425 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3426 case BytecodeInterpreter::popping_frame: return("popping_frame");
3427 case BytecodeInterpreter::do_osr: return("do_osr");
3428 // deopt
3429 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3430 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3431 default: return("BAD MSG");
3432 }
3433 }
3434 void
3435 BytecodeInterpreter::print() {
3436 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3437 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3438 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3439 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3440 {
3441 ResourceMark rm;
3442 char *method_name = _method->name_and_sig_as_C_string();
3443 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3444 }
3445 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
3446 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3447 tty->print_cr("msg: %s", C_msg(this->_msg));
3448 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3449 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3450 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3451 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3452 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3453 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3454 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
3455 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3456 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3457 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3458 #ifdef SPARC
3459 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
3460 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
3461 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
3462 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
3463 #endif
3464 #if !defined(ZERO) && defined(PPC)
3465 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
3466 #endif // !ZERO
3467 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3468 }
3469
3470 extern "C" {
3471 void PI(uintptr_t arg) {
3472 ((BytecodeInterpreter*)arg)->print();
3473 }
3474 }
3475 #endif // PRODUCT
3476
3477 #endif // JVMTI
3478 #endif // CC_INTERP
--- EOF ---