rev 48494 : 8195112: x86 (32 bit): implementation for Thread-local handshakes
Reviewed-by:
1 /*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/interp_masm.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateInterpreterGenerator.hpp"
32 #include "interpreter/templateTable.hpp"
33 #include "oops/arrayOop.hpp"
34 #include "oops/methodData.hpp"
35 #include "oops/method.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "prims/jvmtiExport.hpp"
38 #include "prims/jvmtiThreadState.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/macros.hpp"
49
50 #define __ _masm->
51
52 // Size of interpreter code. Increase if too small. Interpreter will
53 // fail with a guarantee ("not enough space for interpreter generation");
54 // if too small.
55 // Run with +PrintInterpreter to get the VM to print out the size.
56 // Max size with JVMTI
57 #ifdef AMD64
58 int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
59 #else
60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
61 #endif // AMD64
62
63 // Global Register Names
64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
66
67 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
70
71
72 //-----------------------------------------------------------------------------
73
74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
75 address entry = __ pc();
76
77 #ifdef ASSERT
78 {
79 Label L;
80 __ lea(rax, Address(rbp,
81 frame::interpreter_frame_monitor_block_top_offset *
82 wordSize));
83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
84 // grows negative)
85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete
86 __ stop ("interpreter frame not set up");
87 __ bind(L);
88 }
89 #endif // ASSERT
90 // Restore bcp under the assumption that the current frame is still
91 // interpreted
92 __ restore_bcp();
93
94 // expression stack must be empty before entering the VM if an
95 // exception happened
96 __ empty_expression_stack();
97 // throw exception
98 __ call_VM(noreg,
99 CAST_FROM_FN_PTR(address,
100 InterpreterRuntime::throw_StackOverflowError));
101 return entry;
102 }
103
104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
105 const char* name) {
106 address entry = __ pc();
107 // expression stack must be empty before entering the VM if an
108 // exception happened
109 __ empty_expression_stack();
110 // setup parameters
111 // ??? convention: expect aberrant index in register ebx
112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
113 __ lea(rarg, ExternalAddress((address)name));
114 __ call_VM(noreg,
115 CAST_FROM_FN_PTR(address,
116 InterpreterRuntime::
117 throw_ArrayIndexOutOfBoundsException),
118 rarg, rbx);
119 return entry;
120 }
121
122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
123 address entry = __ pc();
124
125 // object is at TOS
126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
127 __ pop(rarg);
128
129 // expression stack must be empty before entering the VM if an
130 // exception happened
131 __ empty_expression_stack();
132
133 __ call_VM(noreg,
134 CAST_FROM_FN_PTR(address,
135 InterpreterRuntime::
136 throw_ClassCastException),
137 rarg);
138 return entry;
139 }
140
141 address TemplateInterpreterGenerator::generate_exception_handler_common(
142 const char* name, const char* message, bool pass_oop) {
143 assert(!pass_oop || message == NULL, "either oop or message but not both");
144 address entry = __ pc();
145
146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
148
149 if (pass_oop) {
150 // object is at TOS
151 __ pop(rarg2);
152 }
153 // expression stack must be empty before entering the VM if an
154 // exception happened
155 __ empty_expression_stack();
156 // setup parameters
157 __ lea(rarg, ExternalAddress((address)name));
158 if (pass_oop) {
159 __ call_VM(rax, CAST_FROM_FN_PTR(address,
160 InterpreterRuntime::
161 create_klass_exception),
162 rarg, rarg2);
163 } else {
164 __ lea(rarg2, ExternalAddress((address)message));
165 __ call_VM(rax,
166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
167 rarg, rarg2);
168 }
169 // throw exception
170 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
171 return entry;
172 }
173
174 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
175 address entry = __ pc();
176
177 #ifndef _LP64
178 #ifdef COMPILER2
179 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
180 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
181 for (int i = 1; i < 8; i++) {
182 __ ffree(i);
183 }
184 } else if (UseSSE < 2) {
185 __ empty_FPU_stack();
186 }
187 #endif // COMPILER2
188 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
189 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
190 } else {
191 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
192 }
193
194 if (state == ftos) {
195 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
196 } else if (state == dtos) {
197 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
198 }
199 #endif // _LP64
200
201 // Restore stack bottom in case i2c adjusted stack
202 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
203 // and NULL it as marker that esp is now tos until next java call
204 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
205
206 __ restore_bcp();
207 __ restore_locals();
208
209 if (state == atos) {
210 Register mdp = rbx;
211 Register tmp = rcx;
212 __ profile_return_type(mdp, rax, tmp);
213 }
214
215 const Register cache = rbx;
216 const Register index = rcx;
217 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
218
219 const Register flags = cache;
220 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
221 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
222 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
223
224 const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
225 if (JvmtiExport::can_pop_frame()) {
226 NOT_LP64(__ get_thread(java_thread));
227 __ check_and_handle_popframe(java_thread);
228 }
229 if (JvmtiExport::can_force_early_return()) {
230 NOT_LP64(__ get_thread(java_thread));
231 __ check_and_handle_earlyret(java_thread);
232 }
233
234 __ dispatch_next(state, step);
235
236 return entry;
237 }
238
239
240 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
241 address entry = __ pc();
242
243 #ifndef _LP64
244 if (state == ftos) {
245 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
246 } else if (state == dtos) {
247 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
248 }
249 #endif // _LP64
250
251 // NULL last_sp until next java call
252 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
253 __ restore_bcp();
254 __ restore_locals();
255 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
256 NOT_LP64(__ get_thread(thread));
257 #if INCLUDE_JVMCI
258 // Check if we need to take lock at entry of synchronized method. This can
259 // only occur on method entry so emit it only for vtos with step 0.
260 if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
261 Label L;
262 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
263 __ jcc(Assembler::zero, L);
264 // Clear flag.
265 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
266 // Satisfy calling convention for lock_method().
267 __ get_method(rbx);
268 // Take lock.
269 lock_method();
270 __ bind(L);
271 } else {
272 #ifdef ASSERT
273 if (EnableJVMCI) {
274 Label L;
275 __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
276 __ jccb(Assembler::zero, L);
277 __ stop("unexpected pending monitor in deopt entry");
278 __ bind(L);
279 }
280 #endif
281 }
282 #endif
283 // handle exceptions
284 {
285 Label L;
286 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
287 __ jcc(Assembler::zero, L);
288 __ call_VM(noreg,
289 CAST_FROM_FN_PTR(address,
290 InterpreterRuntime::throw_pending_exception));
291 __ should_not_reach_here();
292 __ bind(L);
293 }
294 if (continuation == NULL) {
295 __ dispatch_next(state, step);
296 } else {
297 __ jump_to_entry(continuation);
298 }
299 return entry;
300 }
301
302 address TemplateInterpreterGenerator::generate_result_handler_for(
303 BasicType type) {
304 address entry = __ pc();
305 switch (type) {
306 case T_BOOLEAN: __ c2bool(rax); break;
307 #ifndef _LP64
308 case T_CHAR : __ andptr(rax, 0xFFFF); break;
309 #else
310 case T_CHAR : __ movzwl(rax, rax); break;
311 #endif // _LP64
312 case T_BYTE : __ sign_extend_byte(rax); break;
313 case T_SHORT : __ sign_extend_short(rax); break;
314 case T_INT : /* nothing to do */ break;
315 case T_LONG : /* nothing to do */ break;
316 case T_VOID : /* nothing to do */ break;
317 #ifndef _LP64
318 case T_DOUBLE :
319 case T_FLOAT :
320 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
321 __ pop(t); // remove return address first
322 // Must return a result for interpreter or compiler. In SSE
323 // mode, results are returned in xmm0 and the FPU stack must
324 // be empty.
325 if (type == T_FLOAT && UseSSE >= 1) {
326 // Load ST0
327 __ fld_d(Address(rsp, 0));
328 // Store as float and empty fpu stack
329 __ fstp_s(Address(rsp, 0));
330 // and reload
331 __ movflt(xmm0, Address(rsp, 0));
332 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
333 __ movdbl(xmm0, Address(rsp, 0));
334 } else {
335 // restore ST0
336 __ fld_d(Address(rsp, 0));
337 }
338 // and pop the temp
339 __ addptr(rsp, 2 * wordSize);
340 __ push(t); // restore return address
341 }
342 break;
343 #else
344 case T_FLOAT : /* nothing to do */ break;
345 case T_DOUBLE : /* nothing to do */ break;
346 #endif // _LP64
347
348 case T_OBJECT :
349 // retrieve result from frame
350 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
351 // and verify it
352 __ verify_oop(rax);
353 break;
354 default : ShouldNotReachHere();
355 }
356 __ ret(0); // return from result handler
357 return entry;
358 }
359
360 address TemplateInterpreterGenerator::generate_safept_entry_for(
361 TosState state,
362 address runtime_entry) {
363 address entry = __ pc();
364 __ push(state);
365 __ call_VM(noreg, runtime_entry);
366 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
367 return entry;
368 }
369
370
371
372 // Helpers for commoning out cases in the various type of method entries.
373 //
374
375
376 // increment invocation count & check for overflow
377 //
378 // Note: checking for negative value instead of overflow
379 // so we have a 'sticky' overflow test
380 //
381 // rbx: method
382 // rcx: invocation counter
383 //
384 void TemplateInterpreterGenerator::generate_counter_incr(
385 Label* overflow,
386 Label* profile_method,
387 Label* profile_method_continue) {
388 Label done;
389 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
390 if (TieredCompilation) {
391 int increment = InvocationCounter::count_increment;
392 Label no_mdo;
393 if (ProfileInterpreter) {
394 // Are we profiling?
395 __ movptr(rax, Address(rbx, Method::method_data_offset()));
396 __ testptr(rax, rax);
397 __ jccb(Assembler::zero, no_mdo);
398 // Increment counter in the MDO
399 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
400 in_bytes(InvocationCounter::counter_offset()));
401 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
402 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
403 __ jmp(done);
404 }
405 __ bind(no_mdo);
406 // Increment counter in MethodCounters
407 const Address invocation_counter(rax,
408 MethodCounters::invocation_counter_offset() +
409 InvocationCounter::counter_offset());
410 __ get_method_counters(rbx, rax, done);
411 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
412 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
413 false, Assembler::zero, overflow);
414 __ bind(done);
415 } else { // not TieredCompilation
416 const Address backedge_counter(rax,
417 MethodCounters::backedge_counter_offset() +
418 InvocationCounter::counter_offset());
419 const Address invocation_counter(rax,
420 MethodCounters::invocation_counter_offset() +
421 InvocationCounter::counter_offset());
422
423 __ get_method_counters(rbx, rax, done);
424
425 if (ProfileInterpreter) {
426 __ incrementl(Address(rax,
427 MethodCounters::interpreter_invocation_counter_offset()));
428 }
429 // Update standard invocation counters
430 __ movl(rcx, invocation_counter);
431 __ incrementl(rcx, InvocationCounter::count_increment);
432 __ movl(invocation_counter, rcx); // save invocation count
433
434 __ movl(rax, backedge_counter); // load backedge counter
435 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
436
437 __ addl(rcx, rax); // add both counters
438
439 // profile_method is non-null only for interpreted method so
440 // profile_method != NULL == !native_call
441
442 if (ProfileInterpreter && profile_method != NULL) {
443 // Test to see if we should create a method data oop
444 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
445 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
446 __ jcc(Assembler::less, *profile_method_continue);
447
448 // if no method data exists, go to profile_method
449 __ test_method_data_pointer(rax, *profile_method);
450 }
451
452 __ movptr(rax, Address(rbx, Method::method_counters_offset()));
453 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
454 __ jcc(Assembler::aboveEqual, *overflow);
455 __ bind(done);
456 }
457 }
458
459 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
460
461 // Asm interpreter on entry
462 // r14/rdi - locals
463 // r13/rsi - bcp
464 // rbx - method
465 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
466 // rbp - interpreter frame
467
468 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
469 // Everything as it was on entry
470 // rdx is not restored. Doesn't appear to really be set.
471
472 // InterpreterRuntime::frequency_counter_overflow takes two
473 // arguments, the first (thread) is passed by call_VM, the second
474 // indicates if the counter overflow occurs at a backwards branch
475 // (NULL bcp). We pass zero for it. The call returns the address
476 // of the verified entry point for the method or NULL if the
477 // compilation did not complete (either went background or bailed
478 // out).
479 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
480 __ movl(rarg, 0);
481 __ call_VM(noreg,
482 CAST_FROM_FN_PTR(address,
483 InterpreterRuntime::frequency_counter_overflow),
484 rarg);
485
486 __ movptr(rbx, Address(rbp, method_offset)); // restore Method*
487 // Preserve invariant that r13/r14 contain bcp/locals of sender frame
488 // and jump to the interpreted entry.
489 __ jmp(do_continue, relocInfo::none);
490 }
491
492 // See if we've got enough room on the stack for locals plus overhead below
493 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
494 // without going through the signal handler, i.e., reserved and yellow zones
495 // will not be made usable. The shadow zone must suffice to handle the
496 // overflow.
497 // The expression stack grows down incrementally, so the normal guard
498 // page mechanism will work for that.
499 //
500 // NOTE: Since the additional locals are also always pushed (wasn't
501 // obvious in generate_fixed_frame) so the guard should work for them
502 // too.
503 //
504 // Args:
505 // rdx: number of additional locals this frame needs (what we must check)
506 // rbx: Method*
507 //
508 // Kills:
509 // rax
510 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
511
512 // monitor entry size: see picture of stack in frame_x86.hpp
513 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
514
515 // total overhead size: entry_size + (saved rbp through expr stack
516 // bottom). be sure to change this if you add/subtract anything
517 // to/from the overhead area
518 const int overhead_size =
519 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
520
521 const int page_size = os::vm_page_size();
522
523 Label after_frame_check;
524
525 // see if the frame is greater than one page in size. If so,
526 // then we need to verify there is enough stack space remaining
527 // for the additional locals.
528 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
529 __ jcc(Assembler::belowEqual, after_frame_check);
530
531 // compute rsp as if this were going to be the last frame on
532 // the stack before the red zone
533
534 Label after_frame_check_pop;
535 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
536 #ifndef _LP64
537 __ push(thread);
538 __ get_thread(thread);
539 #endif
540
541 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset());
542
543 // locals + overhead, in bytes
544 __ mov(rax, rdx);
545 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes.
546 __ addptr(rax, overhead_size);
547
548 #ifdef ASSERT
549 Label limit_okay;
550 // Verify that thread stack overflow limit is non-zero.
551 __ cmpptr(stack_limit, (int32_t)NULL_WORD);
552 __ jcc(Assembler::notEqual, limit_okay);
553 __ stop("stack overflow limit is zero");
554 __ bind(limit_okay);
555 #endif
556
557 // Add locals/frame size to stack limit.
558 __ addptr(rax, stack_limit);
559
560 // Check against the current stack bottom.
561 __ cmpptr(rsp, rax);
562
563 __ jcc(Assembler::above, after_frame_check_pop);
564 NOT_LP64(__ pop(rsi)); // get saved bcp
565
566 // Restore sender's sp as SP. This is necessary if the sender's
567 // frame is an extended compiled frame (see gen_c2i_adapter())
568 // and safer anyway in case of JSR292 adaptations.
569
570 __ pop(rax); // return address must be moved if SP is changed
571 __ mov(rsp, rbcp);
572 __ push(rax);
573
574 // Note: the restored frame is not necessarily interpreted.
575 // Use the shared runtime version of the StackOverflowError.
576 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
577 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
578 // all done with frame size check
579 __ bind(after_frame_check_pop);
580 NOT_LP64(__ pop(rsi));
581
582 // all done with frame size check
583 __ bind(after_frame_check);
584 }
585
586 // Allocate monitor and lock method (asm interpreter)
587 //
588 // Args:
589 // rbx: Method*
590 // r14/rdi: locals
591 //
592 // Kills:
593 // rax
594 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
595 // rscratch1, rscratch2 (scratch regs)
596 void TemplateInterpreterGenerator::lock_method() {
597 // synchronize method
598 const Address access_flags(rbx, Method::access_flags_offset());
599 const Address monitor_block_top(
600 rbp,
601 frame::interpreter_frame_monitor_block_top_offset * wordSize);
602 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
603
604 #ifdef ASSERT
605 {
606 Label L;
607 __ movl(rax, access_flags);
608 __ testl(rax, JVM_ACC_SYNCHRONIZED);
609 __ jcc(Assembler::notZero, L);
610 __ stop("method doesn't need synchronization");
611 __ bind(L);
612 }
613 #endif // ASSERT
614
615 // get synchronization object
616 {
617 Label done;
618 __ movl(rax, access_flags);
619 __ testl(rax, JVM_ACC_STATIC);
620 // get receiver (assume this is frequent case)
621 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
622 __ jcc(Assembler::zero, done);
623 __ load_mirror(rax, rbx);
624
625 #ifdef ASSERT
626 {
627 Label L;
628 __ testptr(rax, rax);
629 __ jcc(Assembler::notZero, L);
630 __ stop("synchronization object is NULL");
631 __ bind(L);
632 }
633 #endif // ASSERT
634
635 __ bind(done);
636 }
637
638 // add space for monitor & lock
639 __ subptr(rsp, entry_size); // add space for a monitor entry
640 __ movptr(monitor_block_top, rsp); // set new monitor block top
641 // store object
642 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
643 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
644 __ movptr(lockreg, rsp); // object address
645 __ lock_object(lockreg);
646 }
647
648 // Generate a fixed interpreter frame. This is identical setup for
649 // interpreted methods and for native methods hence the shared code.
650 //
651 // Args:
652 // rax: return address
653 // rbx: Method*
654 // r14/rdi: pointer to locals
655 // r13/rsi: sender sp
656 // rdx: cp cache
657 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
658 // initialize fixed part of activation frame
659 __ push(rax); // save return address
660 __ enter(); // save old & set new rbp
661 __ push(rbcp); // set sender sp
662 __ push((int)NULL_WORD); // leave last_sp as null
663 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod*
664 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
665 __ push(rbx); // save Method*
666 // Get mirror and store it in the frame as GC root for this Method*
667 __ load_mirror(rdx, rbx);
668 __ push(rdx);
669 if (ProfileInterpreter) {
670 Label method_data_continue;
671 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
672 __ testptr(rdx, rdx);
673 __ jcc(Assembler::zero, method_data_continue);
674 __ addptr(rdx, in_bytes(MethodData::data_offset()));
675 __ bind(method_data_continue);
676 __ push(rdx); // set the mdp (method data pointer)
677 } else {
678 __ push(0);
679 }
680
681 __ movptr(rdx, Address(rbx, Method::const_offset()));
682 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
683 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
684 __ push(rdx); // set constant pool cache
685 __ push(rlocals); // set locals pointer
686 if (native_call) {
687 __ push(0); // no bcp
688 } else {
689 __ push(rbcp); // set bcp
690 }
691 __ push(0); // reserve word for pointer to expression stack bottom
692 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
693 }
694
695 // End of helpers
696
697 // Method entry for java.lang.ref.Reference.get.
698 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
699 #if INCLUDE_ALL_GCS
700 // Code: _aload_0, _getfield, _areturn
701 // parameter size = 1
702 //
703 // The code that gets generated by this routine is split into 2 parts:
704 // 1. The "intrinsified" code for G1 (or any SATB based GC),
705 // 2. The slow path - which is an expansion of the regular method entry.
706 //
707 // Notes:-
708 // * In the G1 code we do not check whether we need to block for
709 // a safepoint. If G1 is enabled then we must execute the specialized
710 // code for Reference.get (except when the Reference object is null)
711 // so that we can log the value in the referent field with an SATB
712 // update buffer.
713 // If the code for the getfield template is modified so that the
714 // G1 pre-barrier code is executed when the current method is
715 // Reference.get() then going through the normal method entry
716 // will be fine.
717 // * The G1 code can, however, check the receiver object (the instance
718 // of java.lang.Reference) and jump to the slow path if null. If the
719 // Reference object is null then we obviously cannot fetch the referent
720 // and so we don't need to call the G1 pre-barrier. Thus we can use the
721 // regular method entry code to generate the NPE.
722 //
723 // rbx: Method*
724
725 // r13: senderSP must preserve for slow path, set SP to it on fast path
726
727 address entry = __ pc();
728
729 const int referent_offset = java_lang_ref_Reference::referent_offset;
730 guarantee(referent_offset > 0, "referent offset not initialized");
731
732 if (UseG1GC) {
733 Label slow_path;
734 // rbx: method
735
736 // Check if local 0 != NULL
737 // If the receiver is null then it is OK to jump to the slow path.
738 __ movptr(rax, Address(rsp, wordSize));
739
740 __ testptr(rax, rax);
741 __ jcc(Assembler::zero, slow_path);
742
743 // rax: local 0
744 // rbx: method (but can be used as scratch now)
745 // rdx: scratch
746 // rdi: scratch
747
748 // Preserve the sender sp in case the pre-barrier
749 // calls the runtime
750 NOT_LP64(__ push(rsi));
751
752 // Generate the G1 pre-barrier code to log the value of
753 // the referent field in an SATB buffer.
754
755 // Load the value of the referent field.
756 const Address field_address(rax, referent_offset);
757 __ load_heap_oop(rax, field_address);
758
759 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
760 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
761 NOT_LP64(__ get_thread(thread));
762
763 // Generate the G1 pre-barrier code to log the value of
764 // the referent field in an SATB buffer.
765 __ g1_write_barrier_pre(noreg /* obj */,
766 rax /* pre_val */,
767 thread /* thread */,
768 rbx /* tmp */,
769 true /* tosca_live */,
770 true /* expand_call */);
771
772 // _areturn
773 NOT_LP64(__ pop(rsi)); // get sender sp
774 __ pop(rdi); // get return address
775 __ mov(rsp, sender_sp); // set sp to sender sp
776 __ jmp(rdi);
777 __ ret(0);
778
779 // generate a vanilla interpreter entry as the slow path
780 __ bind(slow_path);
781 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
782 return entry;
783 }
784 #endif // INCLUDE_ALL_GCS
785
786 // If G1 is not enabled then attempt to go through the accessor entry point
787 // Reference.get is an accessor
788 return NULL;
789 }
790
791 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
792 // Quick & dirty stack overflow checking: bang the stack & handle trap.
793 // Note that we do the banging after the frame is setup, since the exception
794 // handling code expects to find a valid interpreter frame on the stack.
795 // Doing the banging earlier fails if the caller frame is not an interpreter
796 // frame.
797 // (Also, the exception throwing code expects to unlock any synchronized
798 // method receiever, so do the banging after locking the receiver.)
799
800 // Bang each page in the shadow zone. We can't assume it's been done for
801 // an interpreter frame with greater than a page of locals, so each page
802 // needs to be checked. Only true for non-native.
803 if (UseStackBanging) {
804 const int page_size = os::vm_page_size();
805 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
806 const int start_page = native_call ? n_shadow_pages : 1;
807 for (int pages = start_page; pages <= n_shadow_pages; pages++) {
808 __ bang_stack_with_offset(pages*page_size);
809 }
810 }
811 }
812
813 // Interpreter stub for calling a native method. (asm interpreter)
814 // This sets up a somewhat different looking stack for calling the
815 // native method than the typical interpreter frame setup.
816 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
817 // determine code generation flags
818 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
819
820 // rbx: Method*
821 // rbcp: sender sp
822
823 address entry_point = __ pc();
824
825 const Address constMethod (rbx, Method::const_offset());
826 const Address access_flags (rbx, Method::access_flags_offset());
827 const Address size_of_parameters(rcx, ConstMethod::
828 size_of_parameters_offset());
829
830
831 // get parameter size (always needed)
832 __ movptr(rcx, constMethod);
833 __ load_unsigned_short(rcx, size_of_parameters);
834
835 // native calls don't need the stack size check since they have no
836 // expression stack and the arguments are already on the stack and
837 // we only add a handful of words to the stack
838
839 // rbx: Method*
840 // rcx: size of parameters
841 // rbcp: sender sp
842 __ pop(rax); // get return address
843
844 // for natives the size of locals is zero
845
846 // compute beginning of parameters
847 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
848
849 // add 2 zero-initialized slots for native calls
850 // initialize result_handler slot
851 __ push((int) NULL_WORD);
852 // slot for oop temp
853 // (static native method holder mirror/jni oop result)
854 __ push((int) NULL_WORD);
855
856 // initialize fixed part of activation frame
857 generate_fixed_frame(true);
858
859 // make sure method is native & not abstract
860 #ifdef ASSERT
861 __ movl(rax, access_flags);
862 {
863 Label L;
864 __ testl(rax, JVM_ACC_NATIVE);
865 __ jcc(Assembler::notZero, L);
866 __ stop("tried to execute non-native method as native");
867 __ bind(L);
868 }
869 {
870 Label L;
871 __ testl(rax, JVM_ACC_ABSTRACT);
872 __ jcc(Assembler::zero, L);
873 __ stop("tried to execute abstract method in interpreter");
874 __ bind(L);
875 }
876 #endif
877
878 // Since at this point in the method invocation the exception handler
879 // would try to exit the monitor of synchronized methods which hasn't
880 // been entered yet, we set the thread local variable
881 // _do_not_unlock_if_synchronized to true. The remove_activation will
882 // check this flag.
883
884 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
885 NOT_LP64(__ get_thread(thread1));
886 const Address do_not_unlock_if_synchronized(thread1,
887 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
888 __ movbool(do_not_unlock_if_synchronized, true);
889
890 // increment invocation count & check for overflow
891 Label invocation_counter_overflow;
892 if (inc_counter) {
893 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
894 }
895
896 Label continue_after_compile;
897 __ bind(continue_after_compile);
898
899 bang_stack_shadow_pages(true);
900
901 // reset the _do_not_unlock_if_synchronized flag
902 NOT_LP64(__ get_thread(thread1));
903 __ movbool(do_not_unlock_if_synchronized, false);
904
905 // check for synchronized methods
906 // Must happen AFTER invocation_counter check and stack overflow check,
907 // so method is not locked if overflows.
908 if (synchronized) {
909 lock_method();
910 } else {
911 // no synchronization necessary
912 #ifdef ASSERT
913 {
914 Label L;
915 __ movl(rax, access_flags);
916 __ testl(rax, JVM_ACC_SYNCHRONIZED);
917 __ jcc(Assembler::zero, L);
918 __ stop("method needs synchronization");
919 __ bind(L);
920 }
921 #endif
922 }
923
924 // start execution
925 #ifdef ASSERT
926 {
927 Label L;
928 const Address monitor_block_top(rbp,
929 frame::interpreter_frame_monitor_block_top_offset * wordSize);
930 __ movptr(rax, monitor_block_top);
931 __ cmpptr(rax, rsp);
932 __ jcc(Assembler::equal, L);
933 __ stop("broken stack frame setup in interpreter");
934 __ bind(L);
935 }
936 #endif
937
938 // jvmti support
939 __ notify_method_entry();
940
941 // work registers
942 const Register method = rbx;
943 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
944 const Register t = NOT_LP64(rcx) LP64_ONLY(r11);
945
946 // allocate space for parameters
947 __ get_method(method);
948 __ movptr(t, Address(method, Method::const_offset()));
949 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
950
951 #ifndef _LP64
952 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes.
953 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
954 __ subptr(rsp, t);
955 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
956 #else
957 __ shll(t, Interpreter::logStackElementSize);
958
959 __ subptr(rsp, t);
960 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
961 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
962 #endif // _LP64
963
964 // get signature handler
965 {
966 Label L;
967 __ movptr(t, Address(method, Method::signature_handler_offset()));
968 __ testptr(t, t);
969 __ jcc(Assembler::notZero, L);
970 __ call_VM(noreg,
971 CAST_FROM_FN_PTR(address,
972 InterpreterRuntime::prepare_native_call),
973 method);
974 __ get_method(method);
975 __ movptr(t, Address(method, Method::signature_handler_offset()));
976 __ bind(L);
977 }
978
979 // call signature handler
980 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
981 "adjust this code");
982 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
983 "adjust this code");
984 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
985 "adjust this code");
986
987 // The generated handlers do not touch RBX (the method oop).
988 // However, large signatures cannot be cached and are generated
989 // each time here. The slow-path generator can do a GC on return,
990 // so we must reload it after the call.
991 __ call(t);
992 __ get_method(method); // slow path can do a GC, reload RBX
993
994
995 // result handler is in rax
996 // set result handler
997 __ movptr(Address(rbp,
998 (frame::interpreter_frame_result_handler_offset) * wordSize),
999 rax);
1000
1001 // pass mirror handle if static call
1002 {
1003 Label L;
1004 __ movl(t, Address(method, Method::access_flags_offset()));
1005 __ testl(t, JVM_ACC_STATIC);
1006 __ jcc(Assembler::zero, L);
1007 // get mirror
1008 __ load_mirror(t, method);
1009 // copy mirror into activation frame
1010 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
1011 t);
1012 // pass handle to mirror
1013 #ifndef _LP64
1014 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1015 __ movptr(Address(rsp, wordSize), t);
1016 #else
1017 __ lea(c_rarg1,
1018 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
1019 #endif // _LP64
1020 __ bind(L);
1021 }
1022
1023 // get native function entry point
1024 {
1025 Label L;
1026 __ movptr(rax, Address(method, Method::native_function_offset()));
1027 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1028 __ cmpptr(rax, unsatisfied.addr());
1029 __ jcc(Assembler::notEqual, L);
1030 __ call_VM(noreg,
1031 CAST_FROM_FN_PTR(address,
1032 InterpreterRuntime::prepare_native_call),
1033 method);
1034 __ get_method(method);
1035 __ movptr(rax, Address(method, Method::native_function_offset()));
1036 __ bind(L);
1037 }
1038
1039 // pass JNIEnv
1040 #ifndef _LP64
1041 __ get_thread(thread);
1042 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1043 __ movptr(Address(rsp, 0), t);
1044
1045 // set_last_Java_frame_before_call
1046 // It is enough that the pc()
1047 // points into the right code segment. It does not have to be the correct return pc.
1048 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1049 #else
1050 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1051
1052 // It is enough that the pc() points into the right code
1053 // segment. It does not have to be the correct return pc.
1054 __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1055 #endif // _LP64
1056
1057 // change thread state
1058 #ifdef ASSERT
1059 {
1060 Label L;
1061 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1062 __ cmpl(t, _thread_in_Java);
1063 __ jcc(Assembler::equal, L);
1064 __ stop("Wrong thread state in native stub");
1065 __ bind(L);
1066 }
1067 #endif
1068
1069 // Change state to native
1070
1071 __ movl(Address(thread, JavaThread::thread_state_offset()),
1072 _thread_in_native);
1073
1074 // Call the native method.
1075 __ call(rax);
1076 // 32: result potentially in rdx:rax or ST0
1077 // 64: result potentially in rax or xmm0
1078
1079 // Verify or restore cpu control state after JNI call
1080 __ restore_cpu_control_state_after_jni();
1081
1082 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1083 // in order to extract the result of a method call. If the order of these
1084 // pushes change or anything else is added to the stack then the code in
1085 // interpreter_frame_result must also change.
1086
1087 #ifndef _LP64
1088 // save potential result in ST(0) & rdx:rax
1089 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1090 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1091 // It is safe to do this push because state is _thread_in_native and return address will be found
1092 // via _last_native_pc and not via _last_jave_sp
1093
1094 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1095 // If the order changes or anything else is added to the stack the code in
1096 // interpreter_frame_result will have to be changed.
1097
1098 { Label L;
1099 Label push_double;
1100 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1101 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1102 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1103 float_handler.addr());
1104 __ jcc(Assembler::equal, push_double);
1105 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1106 double_handler.addr());
1107 __ jcc(Assembler::notEqual, L);
1108 __ bind(push_double);
1109 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
1110 __ bind(L);
1111 }
1112 #else
1113 __ push(dtos);
1114 #endif // _LP64
1115
1116 __ push(ltos);
1117
1118 // change thread state
1119 NOT_LP64(__ get_thread(thread));
1120 __ movl(Address(thread, JavaThread::thread_state_offset()),
1121 _thread_in_native_trans);
1122
1123 if (os::is_MP()) {
1124 if (UseMembar) {
1125 // Force this write out before the read below
1126 __ membar(Assembler::Membar_mask_bits(
1127 Assembler::LoadLoad | Assembler::LoadStore |
1128 Assembler::StoreLoad | Assembler::StoreStore));
1129 } else {
1130 // Write serialization page so VM thread can do a pseudo remote membar.
1131 // We use the current thread pointer to calculate a thread specific
1132 // offset to write to within the page. This minimizes bus traffic
1133 // due to cache line collision.
1134 __ serialize_memory(thread, rcx);
1135 }
1136 }
1137
1138 #ifndef _LP64
1139 if (AlwaysRestoreFPU) {
1140 // Make sure the control word is correct.
1141 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1142 }
1143 #endif // _LP64
1144
1145 // check for safepoint operation in progress and/or pending suspend requests
1146 {
1147 Label Continue;
1148 Label slow_path;
1149
1150 #ifndef _LP64
1151 __ safepoint_poll(slow_path);
1152 #else
1153 __ safepoint_poll(slow_path, r15_thread, rscratch1);
1154 #endif
1155
1156 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1157 __ jcc(Assembler::equal, Continue);
1158 __ bind(slow_path);
1159
1160 // Don't use call_VM as it will see a possible pending exception
1161 // and forward it and never return here preventing us from
1162 // clearing _last_native_pc down below. Also can't use
1163 // call_VM_leaf either as it will check to see if r13 & r14 are
1164 // preserved and correspond to the bcp/locals pointers. So we do a
1165 // runtime call by hand.
1166 //
1167 #ifndef _LP64
1168 __ push(thread);
1169 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1170 JavaThread::check_special_condition_for_native_trans)));
1171 __ increment(rsp, wordSize);
1172 __ get_thread(thread);
1173 #else
1174 __ mov(c_rarg0, r15_thread);
1175 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1176 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1177 __ andptr(rsp, -16); // align stack as required by ABI
1178 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1179 __ mov(rsp, r12); // restore sp
1180 __ reinit_heapbase();
1181 #endif // _LP64
1182 __ bind(Continue);
1183 }
1184
1185 // change thread state
1186 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1187
1188 // reset_last_Java_frame
1189 __ reset_last_Java_frame(thread, true);
1190
1191 if (CheckJNICalls) {
1192 // clear_pending_jni_exception_check
1193 __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
1194 }
1195
1196 // reset handle block
1197 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1198 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1199
1200 // If result is an oop unbox and store it in frame where gc will see it
1201 // and result handler will pick it up
1202
1203 {
1204 Label no_oop, not_weak, store_result;
1205 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1206 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1207 __ jcc(Assembler::notEqual, no_oop);
1208 // retrieve result
1209 __ pop(ltos);
1210 // Unbox oop result, e.g. JNIHandles::resolve value.
1211 __ resolve_jobject(rax /* value */,
1212 thread /* thread */,
1213 t /* tmp */);
1214 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
1215 // keep stack depth as expected by pushing oop which will eventually be discarded
1216 __ push(ltos);
1217 __ bind(no_oop);
1218 }
1219
1220
1221 {
1222 Label no_reguard;
1223 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
1224 JavaThread::stack_guard_yellow_reserved_disabled);
1225 __ jcc(Assembler::notEqual, no_reguard);
1226
1227 __ pusha(); // XXX only save smashed registers
1228 #ifndef _LP64
1229 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1230 __ popa();
1231 #else
1232 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1233 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1234 __ andptr(rsp, -16); // align stack as required by ABI
1235 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1236 __ mov(rsp, r12); // restore sp
1237 __ popa(); // XXX only restore smashed registers
1238 __ reinit_heapbase();
1239 #endif // _LP64
1240
1241 __ bind(no_reguard);
1242 }
1243
1244
1245 // The method register is junk from after the thread_in_native transition
1246 // until here. Also can't call_VM until the bcp has been
1247 // restored. Need bcp for throwing exception below so get it now.
1248 __ get_method(method);
1249
1250 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base()
1251 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod*
1252 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
1253
1254 // handle exceptions (exception handling will handle unlocking!)
1255 {
1256 Label L;
1257 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1258 __ jcc(Assembler::zero, L);
1259 // Note: At some point we may want to unify this with the code
1260 // used in call_VM_base(); i.e., we should use the
1261 // StubRoutines::forward_exception code. For now this doesn't work
1262 // here because the rsp is not correctly set at this point.
1263 __ MacroAssembler::call_VM(noreg,
1264 CAST_FROM_FN_PTR(address,
1265 InterpreterRuntime::throw_pending_exception));
1266 __ should_not_reach_here();
1267 __ bind(L);
1268 }
1269
1270 // do unlocking if necessary
1271 {
1272 Label L;
1273 __ movl(t, Address(method, Method::access_flags_offset()));
1274 __ testl(t, JVM_ACC_SYNCHRONIZED);
1275 __ jcc(Assembler::zero, L);
1276 // the code below should be shared with interpreter macro
1277 // assembler implementation
1278 {
1279 Label unlock;
1280 // BasicObjectLock will be first in list, since this is a
1281 // synchronized method. However, need to check that the object
1282 // has not been unlocked by an explicit monitorexit bytecode.
1283 const Address monitor(rbp,
1284 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1285 wordSize - (int)sizeof(BasicObjectLock)));
1286
1287 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1288
1289 // monitor expect in c_rarg1 for slow unlock path
1290 __ lea(regmon, monitor); // address of first monitor
1291
1292 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1293 __ testptr(t, t);
1294 __ jcc(Assembler::notZero, unlock);
1295
1296 // Entry already unlocked, need to throw exception
1297 __ MacroAssembler::call_VM(noreg,
1298 CAST_FROM_FN_PTR(address,
1299 InterpreterRuntime::throw_illegal_monitor_state_exception));
1300 __ should_not_reach_here();
1301
1302 __ bind(unlock);
1303 __ unlock_object(regmon);
1304 }
1305 __ bind(L);
1306 }
1307
1308 // jvmti support
1309 // Note: This must happen _after_ handling/throwing any exceptions since
1310 // the exception handler code notifies the runtime of method exits
1311 // too. If this happens before, method entry/exit notifications are
1312 // not properly paired (was bug - gri 11/22/99).
1313 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1314
1315 // restore potential result in edx:eax, call result handler to
1316 // restore potential result in ST0 & handle result
1317
1318 __ pop(ltos);
1319 LP64_ONLY( __ pop(dtos));
1320
1321 __ movptr(t, Address(rbp,
1322 (frame::interpreter_frame_result_handler_offset) * wordSize));
1323 __ call(t);
1324
1325 // remove activation
1326 __ movptr(t, Address(rbp,
1327 frame::interpreter_frame_sender_sp_offset *
1328 wordSize)); // get sender sp
1329 __ leave(); // remove frame anchor
1330 __ pop(rdi); // get return address
1331 __ mov(rsp, t); // set sp to sender sp
1332 __ jmp(rdi);
1333
1334 if (inc_counter) {
1335 // Handle overflow of counter and compile method
1336 __ bind(invocation_counter_overflow);
1337 generate_counter_overflow(continue_after_compile);
1338 }
1339
1340 return entry_point;
1341 }
1342
1343 // Abstract method entry
1344 // Attempt to execute abstract method. Throw exception
1345 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
1346
1347 address entry_point = __ pc();
1348
1349 // abstract method entry
1350
1351 // pop return address, reset last_sp to NULL
1352 __ empty_expression_stack();
1353 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
1354 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
1355
1356 // throw exception
1357 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
1358 // the call_VM checks for exception, so we should never return here.
1359 __ should_not_reach_here();
1360
1361 return entry_point;
1362 }
1363
1364 //
1365 // Generic interpreted method entry to (asm) interpreter
1366 //
1367 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1368 // determine code generation flags
1369 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1370
1371 // ebx: Method*
1372 // rbcp: sender sp
1373 address entry_point = __ pc();
1374
1375 const Address constMethod(rbx, Method::const_offset());
1376 const Address access_flags(rbx, Method::access_flags_offset());
1377 const Address size_of_parameters(rdx,
1378 ConstMethod::size_of_parameters_offset());
1379 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1380
1381
1382 // get parameter size (always needed)
1383 __ movptr(rdx, constMethod);
1384 __ load_unsigned_short(rcx, size_of_parameters);
1385
1386 // rbx: Method*
1387 // rcx: size of parameters
1388 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1389
1390 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1391 __ subl(rdx, rcx); // rdx = no. of additional locals
1392
1393 // YYY
1394 // __ incrementl(rdx);
1395 // __ andl(rdx, -2);
1396
1397 // see if we've got enough room on the stack for locals plus overhead.
1398 generate_stack_overflow_check();
1399
1400 // get return address
1401 __ pop(rax);
1402
1403 // compute beginning of parameters
1404 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1405
1406 // rdx - # of additional locals
1407 // allocate space for locals
1408 // explicitly initialize locals
1409 {
1410 Label exit, loop;
1411 __ testl(rdx, rdx);
1412 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1413 __ bind(loop);
1414 __ push((int) NULL_WORD); // initialize local variables
1415 __ decrementl(rdx); // until everything initialized
1416 __ jcc(Assembler::greater, loop);
1417 __ bind(exit);
1418 }
1419
1420 // initialize fixed part of activation frame
1421 generate_fixed_frame(false);
1422
1423 // make sure method is not native & not abstract
1424 #ifdef ASSERT
1425 __ movl(rax, access_flags);
1426 {
1427 Label L;
1428 __ testl(rax, JVM_ACC_NATIVE);
1429 __ jcc(Assembler::zero, L);
1430 __ stop("tried to execute native method as non-native");
1431 __ bind(L);
1432 }
1433 {
1434 Label L;
1435 __ testl(rax, JVM_ACC_ABSTRACT);
1436 __ jcc(Assembler::zero, L);
1437 __ stop("tried to execute abstract method in interpreter");
1438 __ bind(L);
1439 }
1440 #endif
1441
1442 // Since at this point in the method invocation the exception
1443 // handler would try to exit the monitor of synchronized methods
1444 // which hasn't been entered yet, we set the thread local variable
1445 // _do_not_unlock_if_synchronized to true. The remove_activation
1446 // will check this flag.
1447
1448 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1449 NOT_LP64(__ get_thread(thread));
1450 const Address do_not_unlock_if_synchronized(thread,
1451 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1452 __ movbool(do_not_unlock_if_synchronized, true);
1453
1454 __ profile_parameters_type(rax, rcx, rdx);
1455 // increment invocation count & check for overflow
1456 Label invocation_counter_overflow;
1457 Label profile_method;
1458 Label profile_method_continue;
1459 if (inc_counter) {
1460 generate_counter_incr(&invocation_counter_overflow,
1461 &profile_method,
1462 &profile_method_continue);
1463 if (ProfileInterpreter) {
1464 __ bind(profile_method_continue);
1465 }
1466 }
1467
1468 Label continue_after_compile;
1469 __ bind(continue_after_compile);
1470
1471 // check for synchronized interpreted methods
1472 bang_stack_shadow_pages(false);
1473
1474 // reset the _do_not_unlock_if_synchronized flag
1475 NOT_LP64(__ get_thread(thread));
1476 __ movbool(do_not_unlock_if_synchronized, false);
1477
1478 // check for synchronized methods
1479 // Must happen AFTER invocation_counter check and stack overflow check,
1480 // so method is not locked if overflows.
1481 if (synchronized) {
1482 // Allocate monitor and lock method
1483 lock_method();
1484 } else {
1485 // no synchronization necessary
1486 #ifdef ASSERT
1487 {
1488 Label L;
1489 __ movl(rax, access_flags);
1490 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1491 __ jcc(Assembler::zero, L);
1492 __ stop("method needs synchronization");
1493 __ bind(L);
1494 }
1495 #endif
1496 }
1497
1498 // start execution
1499 #ifdef ASSERT
1500 {
1501 Label L;
1502 const Address monitor_block_top (rbp,
1503 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1504 __ movptr(rax, monitor_block_top);
1505 __ cmpptr(rax, rsp);
1506 __ jcc(Assembler::equal, L);
1507 __ stop("broken stack frame setup in interpreter");
1508 __ bind(L);
1509 }
1510 #endif
1511
1512 // jvmti support
1513 __ notify_method_entry();
1514
1515 __ dispatch_next(vtos);
1516
1517 // invocation counter overflow
1518 if (inc_counter) {
1519 if (ProfileInterpreter) {
1520 // We have decided to profile this method in the interpreter
1521 __ bind(profile_method);
1522 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1523 __ set_method_data_pointer_for_bcp();
1524 __ get_method(rbx);
1525 __ jmp(profile_method_continue);
1526 }
1527 // Handle overflow of counter and compile method
1528 __ bind(invocation_counter_overflow);
1529 generate_counter_overflow(continue_after_compile);
1530 }
1531
1532 return entry_point;
1533 }
1534
1535 //-----------------------------------------------------------------------------
1536 // Exceptions
1537
1538 void TemplateInterpreterGenerator::generate_throw_exception() {
1539 // Entry point in previous activation (i.e., if the caller was
1540 // interpreted)
1541 Interpreter::_rethrow_exception_entry = __ pc();
1542 // Restore sp to interpreter_frame_last_sp even though we are going
1543 // to empty the expression stack for the exception processing.
1544 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1545 // rax: exception
1546 // rdx: return address/pc that threw exception
1547 __ restore_bcp(); // r13/rsi points to call/send
1548 __ restore_locals();
1549 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase.
1550 // Entry point for exceptions thrown within interpreter code
1551 Interpreter::_throw_exception_entry = __ pc();
1552 // expression stack is undefined here
1553 // rax: exception
1554 // r13/rsi: exception bcp
1555 __ verify_oop(rax);
1556 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
1557 LP64_ONLY(__ mov(c_rarg1, rax));
1558
1559 // expression stack must be empty before entering the VM in case of
1560 // an exception
1561 __ empty_expression_stack();
1562 // find exception handler address and preserve exception oop
1563 __ call_VM(rdx,
1564 CAST_FROM_FN_PTR(address,
1565 InterpreterRuntime::exception_handler_for_exception),
1566 rarg);
1567 // rax: exception handler entry point
1568 // rdx: preserved exception oop
1569 // r13/rsi: bcp for exception handler
1570 __ push_ptr(rdx); // push exception which is now the only value on the stack
1571 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1572
1573 // If the exception is not handled in the current frame the frame is
1574 // removed and the exception is rethrown (i.e. exception
1575 // continuation is _rethrow_exception).
1576 //
1577 // Note: At this point the bci is still the bxi for the instruction
1578 // which caused the exception and the expression stack is
1579 // empty. Thus, for any VM calls at this point, GC will find a legal
1580 // oop map (with empty expression stack).
1581
1582 // In current activation
1583 // tos: exception
1584 // esi: exception bcp
1585
1586 //
1587 // JVMTI PopFrame support
1588 //
1589
1590 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1591 __ empty_expression_stack();
1592 // Set the popframe_processing bit in pending_popframe_condition
1593 // indicating that we are currently handling popframe, so that
1594 // call_VMs that may happen later do not trigger new popframe
1595 // handling cycles.
1596 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1597 NOT_LP64(__ get_thread(thread));
1598 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1599 __ orl(rdx, JavaThread::popframe_processing_bit);
1600 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1601
1602 {
1603 // Check to see whether we are returning to a deoptimized frame.
1604 // (The PopFrame call ensures that the caller of the popped frame is
1605 // either interpreted or compiled and deoptimizes it if compiled.)
1606 // In this case, we can't call dispatch_next() after the frame is
1607 // popped, but instead must save the incoming arguments and restore
1608 // them after deoptimization has occurred.
1609 //
1610 // Note that we don't compare the return PC against the
1611 // deoptimization blob's unpack entry because of the presence of
1612 // adapter frames in C2.
1613 Label caller_not_deoptimized;
1614 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1615 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
1616 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1617 InterpreterRuntime::interpreter_contains), rarg);
1618 __ testl(rax, rax);
1619 __ jcc(Assembler::notZero, caller_not_deoptimized);
1620
1621 // Compute size of arguments for saving when returning to
1622 // deoptimized caller
1623 __ get_method(rax);
1624 __ movptr(rax, Address(rax, Method::const_offset()));
1625 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
1626 size_of_parameters_offset())));
1627 __ shll(rax, Interpreter::logStackElementSize);
1628 __ restore_locals();
1629 __ subptr(rlocals, rax);
1630 __ addptr(rlocals, wordSize);
1631 // Save these arguments
1632 NOT_LP64(__ get_thread(thread));
1633 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1634 Deoptimization::
1635 popframe_preserve_args),
1636 thread, rax, rlocals);
1637
1638 __ remove_activation(vtos, rdx,
1639 /* throw_monitor_exception */ false,
1640 /* install_monitor_exception */ false,
1641 /* notify_jvmdi */ false);
1642
1643 // Inform deoptimization that it is responsible for restoring
1644 // these arguments
1645 NOT_LP64(__ get_thread(thread));
1646 __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1647 JavaThread::popframe_force_deopt_reexecution_bit);
1648
1649 // Continue in deoptimization handler
1650 __ jmp(rdx);
1651
1652 __ bind(caller_not_deoptimized);
1653 }
1654
1655 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1656 /* throw_monitor_exception */ false,
1657 /* install_monitor_exception */ false,
1658 /* notify_jvmdi */ false);
1659
1660 // Finish with popframe handling
1661 // A previous I2C followed by a deoptimization might have moved the
1662 // outgoing arguments further up the stack. PopFrame expects the
1663 // mutations to those outgoing arguments to be preserved and other
1664 // constraints basically require this frame to look exactly as
1665 // though it had previously invoked an interpreted activation with
1666 // no space between the top of the expression stack (current
1667 // last_sp) and the top of stack. Rather than force deopt to
1668 // maintain this kind of invariant all the time we call a small
1669 // fixup routine to move the mutated arguments onto the top of our
1670 // expression stack if necessary.
1671 #ifndef _LP64
1672 __ mov(rax, rsp);
1673 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1674 __ get_thread(thread);
1675 // PC must point into interpreter here
1676 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1677 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1678 __ get_thread(thread);
1679 #else
1680 __ mov(c_rarg1, rsp);
1681 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1682 // PC must point into interpreter here
1683 __ set_last_Java_frame(noreg, rbp, __ pc());
1684 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1685 #endif
1686 __ reset_last_Java_frame(thread, true);
1687
1688 // Restore the last_sp and null it out
1689 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1690 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1691
1692 __ restore_bcp();
1693 __ restore_locals();
1694 // The method data pointer was incremented already during
1695 // call profiling. We have to restore the mdp for the current bcp.
1696 if (ProfileInterpreter) {
1697 __ set_method_data_pointer_for_bcp();
1698 }
1699
1700 // Clear the popframe condition flag
1701 NOT_LP64(__ get_thread(thread));
1702 __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1703 JavaThread::popframe_inactive);
1704
1705 #if INCLUDE_JVMTI
1706 {
1707 Label L_done;
1708 const Register local0 = rlocals;
1709
1710 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
1711 __ jcc(Assembler::notEqual, L_done);
1712
1713 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1714 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1715
1716 __ get_method(rdx);
1717 __ movptr(rax, Address(local0, 0));
1718 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
1719
1720 __ testptr(rax, rax);
1721 __ jcc(Assembler::zero, L_done);
1722
1723 __ movptr(Address(rbx, 0), rax);
1724 __ bind(L_done);
1725 }
1726 #endif // INCLUDE_JVMTI
1727
1728 __ dispatch_next(vtos);
1729 // end of PopFrame support
1730
1731 Interpreter::_remove_activation_entry = __ pc();
1732
1733 // preserve exception over this code sequence
1734 __ pop_ptr(rax);
1735 NOT_LP64(__ get_thread(thread));
1736 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1737 // remove the activation (without doing throws on illegalMonitorExceptions)
1738 __ remove_activation(vtos, rdx, false, true, false);
1739 // restore exception
1740 NOT_LP64(__ get_thread(thread));
1741 __ get_vm_result(rax, thread);
1742
1743 // In between activations - previous activation type unknown yet
1744 // compute continuation point - the continuation point expects the
1745 // following registers set up:
1746 //
1747 // rax: exception
1748 // rdx: return address/pc that threw exception
1749 // rsp: expression stack of caller
1750 // rbp: ebp of caller
1751 __ push(rax); // save exception
1752 __ push(rdx); // save return address
1753 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1754 SharedRuntime::exception_handler_for_return_address),
1755 thread, rdx);
1756 __ mov(rbx, rax); // save exception handler
1757 __ pop(rdx); // restore return address
1758 __ pop(rax); // restore exception
1759 // Note that an "issuing PC" is actually the next PC after the call
1760 __ jmp(rbx); // jump to exception
1761 // handler of caller
1762 }
1763
1764
1765 //
1766 // JVMTI ForceEarlyReturn support
1767 //
1768 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1769 address entry = __ pc();
1770
1771 __ restore_bcp();
1772 __ restore_locals();
1773 __ empty_expression_stack();
1774 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse
1775
1776 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1777 NOT_LP64(__ get_thread(thread));
1778 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1779 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1780
1781 // Clear the earlyret state
1782 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1783
1784 __ remove_activation(state, rsi,
1785 false, /* throw_monitor_exception */
1786 false, /* install_monitor_exception */
1787 true); /* notify_jvmdi */
1788 __ jmp(rsi);
1789
1790 return entry;
1791 } // end of ForceEarlyReturn support
1792
1793
1794 //-----------------------------------------------------------------------------
1795 // Helper for vtos entry point generation
1796
1797 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1798 address& bep,
1799 address& cep,
1800 address& sep,
1801 address& aep,
1802 address& iep,
1803 address& lep,
1804 address& fep,
1805 address& dep,
1806 address& vep) {
1807 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1808 Label L;
1809 aep = __ pc(); __ push_ptr(); __ jmp(L);
1810 #ifndef _LP64
1811 fep = __ pc(); __ push(ftos); __ jmp(L);
1812 dep = __ pc(); __ push(dtos); __ jmp(L);
1813 #else
1814 fep = __ pc(); __ push_f(xmm0); __ jmp(L);
1815 dep = __ pc(); __ push_d(xmm0); __ jmp(L);
1816 #endif // _LP64
1817 lep = __ pc(); __ push_l(); __ jmp(L);
1818 bep = cep = sep =
1819 iep = __ pc(); __ push_i();
1820 vep = __ pc();
1821 __ bind(L);
1822 generate_and_dispatch(t);
1823 }
1824
1825 //-----------------------------------------------------------------------------
1826
1827 // Non-product code
1828 #ifndef PRODUCT
1829
1830 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1831 address entry = __ pc();
1832
1833 #ifndef _LP64
1834 // prepare expression stack
1835 __ pop(rcx); // pop return address so expression stack is 'pure'
1836 __ push(state); // save tosca
1837
1838 // pass tosca registers as arguments & call tracer
1839 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx);
1840 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
1841 __ pop(state); // restore tosca
1842
1843 // return
1844 __ jmp(rcx);
1845 #else
1846 __ push(state);
1847 __ push(c_rarg0);
1848 __ push(c_rarg1);
1849 __ push(c_rarg2);
1850 __ push(c_rarg3);
1851 __ mov(c_rarg2, rax); // Pass itos
1852 #ifdef _WIN64
1853 __ movflt(xmm3, xmm0); // Pass ftos
1854 #endif
1855 __ call_VM(noreg,
1856 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
1857 c_rarg1, c_rarg2, c_rarg3);
1858 __ pop(c_rarg3);
1859 __ pop(c_rarg2);
1860 __ pop(c_rarg1);
1861 __ pop(c_rarg0);
1862 __ pop(state);
1863 __ ret(0); // return from result handler
1864 #endif // _LP64
1865
1866 return entry;
1867 }
1868
1869 void TemplateInterpreterGenerator::count_bytecode() {
1870 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1871 }
1872
1873 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1874 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1875 }
1876
1877 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1878 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1879 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1880 __ orl(rbx,
1881 ((int) t->bytecode()) <<
1882 BytecodePairHistogram::log2_number_of_codes);
1883 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1884 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1885 __ incrementl(Address(rscratch1, rbx, Address::times_4));
1886 }
1887
1888
1889 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1890 // Call a little run-time stub to avoid blow-up for each bytecode.
1891 // The run-time runtime saves the right registers, depending on
1892 // the tosca in-state for the given template.
1893
1894 assert(Interpreter::trace_code(t->tos_in()) != NULL,
1895 "entry must have been generated");
1896 #ifndef _LP64
1897 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1898 #else
1899 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1900 __ andptr(rsp, -16); // align stack as required by ABI
1901 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1902 __ mov(rsp, r12); // restore sp
1903 __ reinit_heapbase();
1904 #endif // _LP64
1905 }
1906
1907
1908 void TemplateInterpreterGenerator::stop_interpreter_at() {
1909 Label L;
1910 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1911 StopInterpreterAt);
1912 __ jcc(Assembler::notEqual, L);
1913 __ int3();
1914 __ bind(L);
1915 }
1916 #endif // !PRODUCT
--- EOF ---