1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "interpreter/bytecodeHistogram.hpp" 28 #include "interpreter/interp_masm.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/templateInterpreterGenerator.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "oops/arrayOop.hpp" 34 #include "oops/methodData.hpp" 35 #include "oops/method.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/arguments.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/timer.hpp" 46 #include "runtime/vframeArray.hpp" 47 #include "utilities/debug.hpp" 48 #include "utilities/macros.hpp" 49 50 #define __ _masm-> 51 52 // Size of interpreter code. Increase if too small. Interpreter will 53 // fail with a guarantee ("not enough space for interpreter generation"); 54 // if too small. 55 // Run with +PrintInterpreter to get the VM to print out the size. 56 // Max size with JVMTI 57 #ifdef AMD64 58 int TemplateInterpreter::InterpreterCodeSize = 256 * 1024; 59 #else 60 int TemplateInterpreter::InterpreterCodeSize = 224 * 1024; 61 #endif // AMD64 62 63 // Global Register Names 64 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); 65 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); 66 67 const int method_offset = frame::interpreter_frame_method_offset * wordSize; 68 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; 69 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; 70 71 72 //----------------------------------------------------------------------------- 73 74 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 75 address entry = __ pc(); 76 77 #ifdef ASSERT 78 { 79 Label L; 80 __ lea(rax, Address(rbp, 81 frame::interpreter_frame_monitor_block_top_offset * 82 wordSize)); 83 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack 84 // grows negative) 85 __ jcc(Assembler::aboveEqual, L); // check if frame is complete 86 __ stop ("interpreter frame not set up"); 87 __ bind(L); 88 } 89 #endif // ASSERT 90 // Restore bcp under the assumption that the current frame is still 91 // interpreted 92 __ restore_bcp(); 93 94 // expression stack must be empty before entering the VM if an 95 // exception happened 96 __ empty_expression_stack(); 97 // throw exception 98 __ call_VM(noreg, 99 CAST_FROM_FN_PTR(address, 100 InterpreterRuntime::throw_StackOverflowError)); 101 return entry; 102 } 103 104 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 105 const char* name) { 106 address entry = __ pc(); 107 // expression stack must be empty before entering the VM if an 108 // exception happened 109 __ empty_expression_stack(); 110 // setup parameters 111 // ??? convention: expect aberrant index in register ebx 112 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 113 __ lea(rarg, ExternalAddress((address)name)); 114 __ call_VM(noreg, 115 CAST_FROM_FN_PTR(address, 116 InterpreterRuntime:: 117 throw_ArrayIndexOutOfBoundsException), 118 rarg, rbx); 119 return entry; 120 } 121 122 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 123 address entry = __ pc(); 124 125 // object is at TOS 126 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 127 __ pop(rarg); 128 129 // expression stack must be empty before entering the VM if an 130 // exception happened 131 __ empty_expression_stack(); 132 133 __ call_VM(noreg, 134 CAST_FROM_FN_PTR(address, 135 InterpreterRuntime:: 136 throw_ClassCastException), 137 rarg); 138 return entry; 139 } 140 141 address TemplateInterpreterGenerator::generate_exception_handler_common( 142 const char* name, const char* message, bool pass_oop) { 143 assert(!pass_oop || message == NULL, "either oop or message but not both"); 144 address entry = __ pc(); 145 146 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 147 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); 148 149 if (pass_oop) { 150 // object is at TOS 151 __ pop(rarg2); 152 } 153 // expression stack must be empty before entering the VM if an 154 // exception happened 155 __ empty_expression_stack(); 156 // setup parameters 157 __ lea(rarg, ExternalAddress((address)name)); 158 if (pass_oop) { 159 __ call_VM(rax, CAST_FROM_FN_PTR(address, 160 InterpreterRuntime:: 161 create_klass_exception), 162 rarg, rarg2); 163 } else { 164 __ lea(rarg2, ExternalAddress((address)message)); 165 __ call_VM(rax, 166 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 167 rarg, rarg2); 168 } 169 // throw exception 170 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); 171 return entry; 172 } 173 174 175 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 176 address entry = __ pc(); 177 // NULL last_sp until next java call 178 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 179 __ dispatch_next(state); 180 return entry; 181 } 182 183 184 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 185 address entry = __ pc(); 186 187 #ifndef _LP64 188 #ifdef COMPILER2 189 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases 190 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 191 for (int i = 1; i < 8; i++) { 192 __ ffree(i); 193 } 194 } else if (UseSSE < 2) { 195 __ empty_FPU_stack(); 196 } 197 #endif // COMPILER2 198 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { 199 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); 200 } else { 201 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); 202 } 203 204 if (state == ftos) { 205 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); 206 } else if (state == dtos) { 207 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); 208 } 209 #endif // _LP64 210 211 // Restore stack bottom in case i2c adjusted stack 212 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 213 // and NULL it as marker that esp is now tos until next java call 214 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 215 216 __ restore_bcp(); 217 __ restore_locals(); 218 219 if (state == atos) { 220 Register mdp = rbx; 221 Register tmp = rcx; 222 __ profile_return_type(mdp, rax, tmp); 223 } 224 225 const Register cache = rbx; 226 const Register index = rcx; 227 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); 228 229 const Register flags = cache; 230 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 231 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); 232 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); 233 __ dispatch_next(state, step); 234 235 return entry; 236 } 237 238 239 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 240 address entry = __ pc(); 241 242 #ifndef _LP64 243 if (state == ftos) { 244 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 245 } else if (state == dtos) { 246 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); 247 } 248 #endif // _LP64 249 250 // NULL last_sp until next java call 251 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 252 __ restore_bcp(); 253 __ restore_locals(); 254 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 255 NOT_LP64(__ get_thread(thread)); 256 #if INCLUDE_JVMCI 257 // Check if we need to take lock at entry of synchronized method. 258 if (UseJVMCICompiler) { 259 Label L; 260 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 261 __ jcc(Assembler::zero, L); 262 // Clear flag. 263 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); 264 // Satisfy calling convention for lock_method(). 265 __ get_method(rbx); 266 // Take lock. 267 lock_method(); 268 __ bind(L); 269 } 270 #endif 271 // handle exceptions 272 { 273 Label L; 274 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 275 __ jcc(Assembler::zero, L); 276 __ call_VM(noreg, 277 CAST_FROM_FN_PTR(address, 278 InterpreterRuntime::throw_pending_exception)); 279 __ should_not_reach_here(); 280 __ bind(L); 281 } 282 __ dispatch_next(state, step); 283 return entry; 284 } 285 286 address TemplateInterpreterGenerator::generate_result_handler_for( 287 BasicType type) { 288 address entry = __ pc(); 289 switch (type) { 290 case T_BOOLEAN: __ c2bool(rax); break; 291 #ifndef _LP64 292 case T_CHAR : __ andptr(rax, 0xFFFF); break; 293 #else 294 case T_CHAR : __ movzwl(rax, rax); break; 295 #endif // _LP64 296 case T_BYTE : __ sign_extend_byte(rax); break; 297 case T_SHORT : __ sign_extend_short(rax); break; 298 case T_INT : /* nothing to do */ break; 299 case T_LONG : /* nothing to do */ break; 300 case T_VOID : /* nothing to do */ break; 301 #ifndef _LP64 302 case T_DOUBLE : 303 case T_FLOAT : 304 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); 305 __ pop(t); // remove return address first 306 // Must return a result for interpreter or compiler. In SSE 307 // mode, results are returned in xmm0 and the FPU stack must 308 // be empty. 309 if (type == T_FLOAT && UseSSE >= 1) { 310 // Load ST0 311 __ fld_d(Address(rsp, 0)); 312 // Store as float and empty fpu stack 313 __ fstp_s(Address(rsp, 0)); 314 // and reload 315 __ movflt(xmm0, Address(rsp, 0)); 316 } else if (type == T_DOUBLE && UseSSE >= 2 ) { 317 __ movdbl(xmm0, Address(rsp, 0)); 318 } else { 319 // restore ST0 320 __ fld_d(Address(rsp, 0)); 321 } 322 // and pop the temp 323 __ addptr(rsp, 2 * wordSize); 324 __ push(t); // restore return address 325 } 326 break; 327 #else 328 case T_FLOAT : /* nothing to do */ break; 329 case T_DOUBLE : /* nothing to do */ break; 330 #endif // _LP64 331 332 case T_OBJECT : 333 // retrieve result from frame 334 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); 335 // and verify it 336 __ verify_oop(rax); 337 break; 338 default : ShouldNotReachHere(); 339 } 340 __ ret(0); // return from result handler 341 return entry; 342 } 343 344 address TemplateInterpreterGenerator::generate_safept_entry_for( 345 TosState state, 346 address runtime_entry) { 347 address entry = __ pc(); 348 __ push(state); 349 __ call_VM(noreg, runtime_entry); 350 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 351 return entry; 352 } 353 354 355 356 // Helpers for commoning out cases in the various type of method entries. 357 // 358 359 360 // increment invocation count & check for overflow 361 // 362 // Note: checking for negative value instead of overflow 363 // so we have a 'sticky' overflow test 364 // 365 // rbx: method 366 // rcx: invocation counter 367 // 368 void TemplateInterpreterGenerator::generate_counter_incr( 369 Label* overflow, 370 Label* profile_method, 371 Label* profile_method_continue) { 372 Label done; 373 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 374 if (TieredCompilation) { 375 int increment = InvocationCounter::count_increment; 376 Label no_mdo; 377 if (ProfileInterpreter) { 378 // Are we profiling? 379 __ movptr(rax, Address(rbx, Method::method_data_offset())); 380 __ testptr(rax, rax); 381 __ jccb(Assembler::zero, no_mdo); 382 // Increment counter in the MDO 383 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + 384 in_bytes(InvocationCounter::counter_offset())); 385 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); 386 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); 387 __ jmp(done); 388 } 389 __ bind(no_mdo); 390 // Increment counter in MethodCounters 391 const Address invocation_counter(rax, 392 MethodCounters::invocation_counter_offset() + 393 InvocationCounter::counter_offset()); 394 __ get_method_counters(rbx, rax, done); 395 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); 396 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, 397 false, Assembler::zero, overflow); 398 __ bind(done); 399 } else { // not TieredCompilation 400 const Address backedge_counter(rax, 401 MethodCounters::backedge_counter_offset() + 402 InvocationCounter::counter_offset()); 403 const Address invocation_counter(rax, 404 MethodCounters::invocation_counter_offset() + 405 InvocationCounter::counter_offset()); 406 407 __ get_method_counters(rbx, rax, done); 408 409 if (ProfileInterpreter) { 410 __ incrementl(Address(rax, 411 MethodCounters::interpreter_invocation_counter_offset())); 412 } 413 // Update standard invocation counters 414 __ movl(rcx, invocation_counter); 415 __ incrementl(rcx, InvocationCounter::count_increment); 416 __ movl(invocation_counter, rcx); // save invocation count 417 418 __ movl(rax, backedge_counter); // load backedge counter 419 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits 420 421 __ addl(rcx, rax); // add both counters 422 423 // profile_method is non-null only for interpreted method so 424 // profile_method != NULL == !native_call 425 426 if (ProfileInterpreter && profile_method != NULL) { 427 // Test to see if we should create a method data oop 428 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 429 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 430 __ jcc(Assembler::less, *profile_method_continue); 431 432 // if no method data exists, go to profile_method 433 __ test_method_data_pointer(rax, *profile_method); 434 } 435 436 __ movptr(rax, Address(rbx, Method::method_counters_offset())); 437 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 438 __ jcc(Assembler::aboveEqual, *overflow); 439 __ bind(done); 440 } 441 } 442 443 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 444 445 // Asm interpreter on entry 446 // r14/rdi - locals 447 // r13/rsi - bcp 448 // rbx - method 449 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE 450 // rbp - interpreter frame 451 452 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 453 // Everything as it was on entry 454 // rdx is not restored. Doesn't appear to really be set. 455 456 // InterpreterRuntime::frequency_counter_overflow takes two 457 // arguments, the first (thread) is passed by call_VM, the second 458 // indicates if the counter overflow occurs at a backwards branch 459 // (NULL bcp). We pass zero for it. The call returns the address 460 // of the verified entry point for the method or NULL if the 461 // compilation did not complete (either went background or bailed 462 // out). 463 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 464 __ movl(rarg, 0); 465 __ call_VM(noreg, 466 CAST_FROM_FN_PTR(address, 467 InterpreterRuntime::frequency_counter_overflow), 468 rarg); 469 470 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* 471 // Preserve invariant that r13/r14 contain bcp/locals of sender frame 472 // and jump to the interpreted entry. 473 __ jmp(do_continue, relocInfo::none); 474 } 475 476 // See if we've got enough room on the stack for locals plus overhead below 477 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 478 // without going through the signal handler, i.e., reserved and yellow zones 479 // will not be made usable. The shadow zone must suffice to handle the 480 // overflow. 481 // The expression stack grows down incrementally, so the normal guard 482 // page mechanism will work for that. 483 // 484 // NOTE: Since the additional locals are also always pushed (wasn't 485 // obvious in generate_fixed_frame) so the guard should work for them 486 // too. 487 // 488 // Args: 489 // rdx: number of additional locals this frame needs (what we must check) 490 // rbx: Method* 491 // 492 // Kills: 493 // rax 494 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 495 496 // monitor entry size: see picture of stack in frame_x86.hpp 497 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 498 499 // total overhead size: entry_size + (saved rbp through expr stack 500 // bottom). be sure to change this if you add/subtract anything 501 // to/from the overhead area 502 const int overhead_size = 503 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 504 505 const int page_size = os::vm_page_size(); 506 507 Label after_frame_check; 508 509 // see if the frame is greater than one page in size. If so, 510 // then we need to verify there is enough stack space remaining 511 // for the additional locals. 512 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); 513 __ jcc(Assembler::belowEqual, after_frame_check); 514 515 // compute rsp as if this were going to be the last frame on 516 // the stack before the red zone 517 518 Label after_frame_check_pop; 519 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 520 #ifndef _LP64 521 __ push(thread); 522 __ get_thread(thread); 523 #endif 524 525 const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset()); 526 527 // locals + overhead, in bytes 528 __ mov(rax, rdx); 529 __ shlptr(rax, Interpreter::logStackElementSize); // Convert parameter count to bytes. 530 __ addptr(rax, overhead_size); 531 532 #ifdef ASSERT 533 Label limit_okay; 534 // Verify that thread stack overflow limit is non-zero. 535 __ cmpptr(stack_limit, (int32_t)NULL_WORD); 536 __ jcc(Assembler::notEqual, limit_okay); 537 __ stop("stack overflow limit is zero"); 538 __ bind(limit_okay); 539 #endif 540 541 // Add locals/frame size to stack limit. 542 __ addptr(rax, stack_limit); 543 544 // Check against the current stack bottom. 545 __ cmpptr(rsp, rax); 546 547 __ jcc(Assembler::above, after_frame_check_pop); 548 NOT_LP64(__ pop(rsi)); // get saved bcp 549 550 // Restore sender's sp as SP. This is necessary if the sender's 551 // frame is an extended compiled frame (see gen_c2i_adapter()) 552 // and safer anyway in case of JSR292 adaptations. 553 554 __ pop(rax); // return address must be moved if SP is changed 555 __ mov(rsp, rbcp); 556 __ push(rax); 557 558 // Note: the restored frame is not necessarily interpreted. 559 // Use the shared runtime version of the StackOverflowError. 560 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 561 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); 562 // all done with frame size check 563 __ bind(after_frame_check_pop); 564 NOT_LP64(__ pop(rsi)); 565 566 // all done with frame size check 567 __ bind(after_frame_check); 568 } 569 570 // Allocate monitor and lock method (asm interpreter) 571 // 572 // Args: 573 // rbx: Method* 574 // r14/rdi: locals 575 // 576 // Kills: 577 // rax 578 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 579 // rscratch1, rscratch2 (scratch regs) 580 void TemplateInterpreterGenerator::lock_method() { 581 // synchronize method 582 const Address access_flags(rbx, Method::access_flags_offset()); 583 const Address monitor_block_top( 584 rbp, 585 frame::interpreter_frame_monitor_block_top_offset * wordSize); 586 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 587 588 #ifdef ASSERT 589 { 590 Label L; 591 __ movl(rax, access_flags); 592 __ testl(rax, JVM_ACC_SYNCHRONIZED); 593 __ jcc(Assembler::notZero, L); 594 __ stop("method doesn't need synchronization"); 595 __ bind(L); 596 } 597 #endif // ASSERT 598 599 // get synchronization object 600 { 601 Label done; 602 __ movl(rax, access_flags); 603 __ testl(rax, JVM_ACC_STATIC); 604 // get receiver (assume this is frequent case) 605 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 606 __ jcc(Assembler::zero, done); 607 __ load_mirror(rax, rbx); 608 609 #ifdef ASSERT 610 { 611 Label L; 612 __ testptr(rax, rax); 613 __ jcc(Assembler::notZero, L); 614 __ stop("synchronization object is NULL"); 615 __ bind(L); 616 } 617 #endif // ASSERT 618 619 __ bind(done); 620 oopDesc::bs()->interpreter_write_barrier(_masm, rax); 621 } 622 623 // add space for monitor & lock 624 __ subptr(rsp, entry_size); // add space for a monitor entry 625 __ movptr(monitor_block_top, rsp); // set new monitor block top 626 // store object 627 __ shenandoah_store_addr_check(rax); 628 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); 629 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 630 __ movptr(lockreg, rsp); // object address 631 __ lock_object(lockreg); 632 } 633 634 // Generate a fixed interpreter frame. This is identical setup for 635 // interpreted methods and for native methods hence the shared code. 636 // 637 // Args: 638 // rax: return address 639 // rbx: Method* 640 // r14/rdi: pointer to locals 641 // r13/rsi: sender sp 642 // rdx: cp cache 643 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 644 // initialize fixed part of activation frame 645 __ push(rax); // save return address 646 __ enter(); // save old & set new rbp 647 __ push(rbcp); // set sender sp 648 __ push((int)NULL_WORD); // leave last_sp as null 649 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* 650 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 651 __ push(rbx); // save Method* 652 // Get mirror and store it in the frame as GC root for this Method* 653 __ load_mirror(rdx, rbx); 654 __ push(rdx); 655 if (ProfileInterpreter) { 656 Label method_data_continue; 657 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); 658 __ testptr(rdx, rdx); 659 __ jcc(Assembler::zero, method_data_continue); 660 __ addptr(rdx, in_bytes(MethodData::data_offset())); 661 __ bind(method_data_continue); 662 __ push(rdx); // set the mdp (method data pointer) 663 } else { 664 __ push(0); 665 } 666 667 __ movptr(rdx, Address(rbx, Method::const_offset())); 668 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); 669 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); 670 __ push(rdx); // set constant pool cache 671 __ push(rlocals); // set locals pointer 672 if (native_call) { 673 __ push(0); // no bcp 674 } else { 675 __ push(rbcp); // set bcp 676 } 677 __ push(0); // reserve word for pointer to expression stack bottom 678 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom 679 } 680 681 // End of helpers 682 683 // Method entry for java.lang.ref.Reference.get. 684 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 685 #if INCLUDE_ALL_GCS 686 // Code: _aload_0, _getfield, _areturn 687 // parameter size = 1 688 // 689 // The code that gets generated by this routine is split into 2 parts: 690 // 1. The "intrinsified" code for G1 (or any SATB based GC), 691 // 2. The slow path - which is an expansion of the regular method entry. 692 // 693 // Notes:- 694 // * In the G1 code we do not check whether we need to block for 695 // a safepoint. If G1 is enabled then we must execute the specialized 696 // code for Reference.get (except when the Reference object is null) 697 // so that we can log the value in the referent field with an SATB 698 // update buffer. 699 // If the code for the getfield template is modified so that the 700 // G1 pre-barrier code is executed when the current method is 701 // Reference.get() then going through the normal method entry 702 // will be fine. 703 // * The G1 code can, however, check the receiver object (the instance 704 // of java.lang.Reference) and jump to the slow path if null. If the 705 // Reference object is null then we obviously cannot fetch the referent 706 // and so we don't need to call the G1 pre-barrier. Thus we can use the 707 // regular method entry code to generate the NPE. 708 // 709 // rbx: Method* 710 711 // r13: senderSP must preserve for slow path, set SP to it on fast path 712 713 address entry = __ pc(); 714 715 const int referent_offset = java_lang_ref_Reference::referent_offset; 716 guarantee(referent_offset > 0, "referent offset not initialized"); 717 718 if (UseG1GC || UseShenandoahGC) { 719 Label slow_path; 720 // rbx: method 721 722 // Check if local 0 != NULL 723 // If the receiver is null then it is OK to jump to the slow path. 724 __ movptr(rax, Address(rsp, wordSize)); 725 726 __ testptr(rax, rax); 727 __ jcc(Assembler::zero, slow_path); 728 729 oopDesc::bs()->interpreter_read_barrier_not_null(_masm, rax); 730 731 // rax: local 0 732 // rbx: method (but can be used as scratch now) 733 // rdx: scratch 734 // rdi: scratch 735 736 // Preserve the sender sp in case the pre-barrier 737 // calls the runtime 738 NOT_LP64(__ push(rsi)); 739 740 // Generate the G1 pre-barrier code to log the value of 741 // the referent field in an SATB buffer. 742 743 // Load the value of the referent field. 744 const Address field_address(rax, referent_offset); 745 __ load_heap_oop(rax, field_address); 746 747 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); 748 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 749 NOT_LP64(__ get_thread(thread)); 750 751 // Generate the G1 pre-barrier code to log the value of 752 // the referent field in an SATB buffer. 753 __ g1_write_barrier_pre(noreg /* obj */, 754 rax /* pre_val */, 755 thread /* thread */, 756 rbx /* tmp */, 757 true /* tosca_live */, 758 true /* expand_call */); 759 760 // _areturn 761 NOT_LP64(__ pop(rsi)); // get sender sp 762 __ pop(rdi); // get return address 763 __ mov(rsp, sender_sp); // set sp to sender sp 764 __ jmp(rdi); 765 __ ret(0); 766 767 // generate a vanilla interpreter entry as the slow path 768 __ bind(slow_path); 769 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 770 return entry; 771 } 772 #endif // INCLUDE_ALL_GCS 773 774 // If G1 is not enabled then attempt to go through the accessor entry point 775 // Reference.get is an accessor 776 return NULL; 777 } 778 779 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 780 // Quick & dirty stack overflow checking: bang the stack & handle trap. 781 // Note that we do the banging after the frame is setup, since the exception 782 // handling code expects to find a valid interpreter frame on the stack. 783 // Doing the banging earlier fails if the caller frame is not an interpreter 784 // frame. 785 // (Also, the exception throwing code expects to unlock any synchronized 786 // method receiever, so do the banging after locking the receiver.) 787 788 // Bang each page in the shadow zone. We can't assume it's been done for 789 // an interpreter frame with greater than a page of locals, so each page 790 // needs to be checked. Only true for non-native. 791 if (UseStackBanging) { 792 const int page_size = os::vm_page_size(); 793 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 794 const int start_page = native_call ? n_shadow_pages : 1; 795 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 796 __ bang_stack_with_offset(pages*page_size); 797 } 798 } 799 } 800 801 // Interpreter stub for calling a native method. (asm interpreter) 802 // This sets up a somewhat different looking stack for calling the 803 // native method than the typical interpreter frame setup. 804 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 805 // determine code generation flags 806 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 807 808 // rbx: Method* 809 // rbcp: sender sp 810 811 address entry_point = __ pc(); 812 813 const Address constMethod (rbx, Method::const_offset()); 814 const Address access_flags (rbx, Method::access_flags_offset()); 815 const Address size_of_parameters(rcx, ConstMethod:: 816 size_of_parameters_offset()); 817 818 819 // get parameter size (always needed) 820 __ movptr(rcx, constMethod); 821 __ load_unsigned_short(rcx, size_of_parameters); 822 823 // native calls don't need the stack size check since they have no 824 // expression stack and the arguments are already on the stack and 825 // we only add a handful of words to the stack 826 827 // rbx: Method* 828 // rcx: size of parameters 829 // rbcp: sender sp 830 __ pop(rax); // get return address 831 832 // for natives the size of locals is zero 833 834 // compute beginning of parameters 835 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 836 837 // add 2 zero-initialized slots for native calls 838 // initialize result_handler slot 839 __ push((int) NULL_WORD); 840 // slot for oop temp 841 // (static native method holder mirror/jni oop result) 842 __ push((int) NULL_WORD); 843 844 // initialize fixed part of activation frame 845 generate_fixed_frame(true); 846 847 // make sure method is native & not abstract 848 #ifdef ASSERT 849 __ movl(rax, access_flags); 850 { 851 Label L; 852 __ testl(rax, JVM_ACC_NATIVE); 853 __ jcc(Assembler::notZero, L); 854 __ stop("tried to execute non-native method as native"); 855 __ bind(L); 856 } 857 { 858 Label L; 859 __ testl(rax, JVM_ACC_ABSTRACT); 860 __ jcc(Assembler::zero, L); 861 __ stop("tried to execute abstract method in interpreter"); 862 __ bind(L); 863 } 864 #endif 865 866 // Since at this point in the method invocation the exception handler 867 // would try to exit the monitor of synchronized methods which hasn't 868 // been entered yet, we set the thread local variable 869 // _do_not_unlock_if_synchronized to true. The remove_activation will 870 // check this flag. 871 872 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); 873 NOT_LP64(__ get_thread(thread1)); 874 const Address do_not_unlock_if_synchronized(thread1, 875 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 876 __ movbool(do_not_unlock_if_synchronized, true); 877 878 // increment invocation count & check for overflow 879 Label invocation_counter_overflow; 880 if (inc_counter) { 881 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 882 } 883 884 Label continue_after_compile; 885 __ bind(continue_after_compile); 886 887 bang_stack_shadow_pages(true); 888 889 // reset the _do_not_unlock_if_synchronized flag 890 NOT_LP64(__ get_thread(thread1)); 891 __ movbool(do_not_unlock_if_synchronized, false); 892 893 // check for synchronized methods 894 // Must happen AFTER invocation_counter check and stack overflow check, 895 // so method is not locked if overflows. 896 if (synchronized) { 897 lock_method(); 898 } else { 899 // no synchronization necessary 900 #ifdef ASSERT 901 { 902 Label L; 903 __ movl(rax, access_flags); 904 __ testl(rax, JVM_ACC_SYNCHRONIZED); 905 __ jcc(Assembler::zero, L); 906 __ stop("method needs synchronization"); 907 __ bind(L); 908 } 909 #endif 910 } 911 912 // start execution 913 #ifdef ASSERT 914 { 915 Label L; 916 const Address monitor_block_top(rbp, 917 frame::interpreter_frame_monitor_block_top_offset * wordSize); 918 __ movptr(rax, monitor_block_top); 919 __ cmpptr(rax, rsp); 920 __ jcc(Assembler::equal, L); 921 __ stop("broken stack frame setup in interpreter"); 922 __ bind(L); 923 } 924 #endif 925 926 // jvmti support 927 __ notify_method_entry(); 928 929 // work registers 930 const Register method = rbx; 931 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 932 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); 933 934 // allocate space for parameters 935 __ get_method(method); 936 __ movptr(t, Address(method, Method::const_offset())); 937 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 938 939 #ifndef _LP64 940 __ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes. 941 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 942 __ subptr(rsp, t); 943 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics 944 #else 945 __ shll(t, Interpreter::logStackElementSize); 946 947 __ subptr(rsp, t); 948 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 949 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 950 #endif // _LP64 951 952 // get signature handler 953 { 954 Label L; 955 __ movptr(t, Address(method, Method::signature_handler_offset())); 956 __ testptr(t, t); 957 __ jcc(Assembler::notZero, L); 958 __ call_VM(noreg, 959 CAST_FROM_FN_PTR(address, 960 InterpreterRuntime::prepare_native_call), 961 method); 962 __ get_method(method); 963 __ movptr(t, Address(method, Method::signature_handler_offset())); 964 __ bind(L); 965 } 966 967 // call signature handler 968 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 969 "adjust this code"); 970 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, 971 "adjust this code"); 972 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), 973 "adjust this code"); 974 975 // The generated handlers do not touch RBX (the method oop). 976 // However, large signatures cannot be cached and are generated 977 // each time here. The slow-path generator can do a GC on return, 978 // so we must reload it after the call. 979 __ call(t); 980 __ get_method(method); // slow path can do a GC, reload RBX 981 982 983 // result handler is in rax 984 // set result handler 985 __ movptr(Address(rbp, 986 (frame::interpreter_frame_result_handler_offset) * wordSize), 987 rax); 988 989 // pass mirror handle if static call 990 { 991 Label L; 992 __ movl(t, Address(method, Method::access_flags_offset())); 993 __ testl(t, JVM_ACC_STATIC); 994 __ jcc(Assembler::zero, L); 995 // get mirror 996 __ load_mirror(t, method); 997 // copy mirror into activation frame 998 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), 999 t); 1000 // pass handle to mirror 1001 #ifndef _LP64 1002 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1003 __ movptr(Address(rsp, wordSize), t); 1004 #else 1005 __ lea(c_rarg1, 1006 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1007 #endif // _LP64 1008 __ bind(L); 1009 } 1010 1011 // get native function entry point 1012 { 1013 Label L; 1014 __ movptr(rax, Address(method, Method::native_function_offset())); 1015 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1016 __ cmpptr(rax, unsatisfied.addr()); 1017 __ jcc(Assembler::notEqual, L); 1018 __ call_VM(noreg, 1019 CAST_FROM_FN_PTR(address, 1020 InterpreterRuntime::prepare_native_call), 1021 method); 1022 __ get_method(method); 1023 __ movptr(rax, Address(method, Method::native_function_offset())); 1024 __ bind(L); 1025 } 1026 1027 // pass JNIEnv 1028 #ifndef _LP64 1029 __ get_thread(thread); 1030 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); 1031 __ movptr(Address(rsp, 0), t); 1032 1033 // set_last_Java_frame_before_call 1034 // It is enough that the pc() 1035 // points into the right code segment. It does not have to be the correct return pc. 1036 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1037 #else 1038 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); 1039 1040 // It is enough that the pc() points into the right code 1041 // segment. It does not have to be the correct return pc. 1042 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); 1043 #endif // _LP64 1044 1045 // change thread state 1046 #ifdef ASSERT 1047 { 1048 Label L; 1049 __ movl(t, Address(thread, JavaThread::thread_state_offset())); 1050 __ cmpl(t, _thread_in_Java); 1051 __ jcc(Assembler::equal, L); 1052 __ stop("Wrong thread state in native stub"); 1053 __ bind(L); 1054 } 1055 #endif 1056 1057 // Change state to native 1058 1059 __ movl(Address(thread, JavaThread::thread_state_offset()), 1060 _thread_in_native); 1061 1062 // Call the native method. 1063 __ call(rax); 1064 // 32: result potentially in rdx:rax or ST0 1065 // 64: result potentially in rax or xmm0 1066 1067 // Verify or restore cpu control state after JNI call 1068 __ restore_cpu_control_state_after_jni(); 1069 1070 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1071 // in order to extract the result of a method call. If the order of these 1072 // pushes change or anything else is added to the stack then the code in 1073 // interpreter_frame_result must also change. 1074 1075 #ifndef _LP64 1076 // save potential result in ST(0) & rdx:rax 1077 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - 1078 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) 1079 // It is safe to do this push because state is _thread_in_native and return address will be found 1080 // via _last_native_pc and not via _last_jave_sp 1081 1082 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. 1083 // If the order changes or anything else is added to the stack the code in 1084 // interpreter_frame_result will have to be changed. 1085 1086 { Label L; 1087 Label push_double; 1088 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); 1089 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); 1090 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1091 float_handler.addr()); 1092 __ jcc(Assembler::equal, push_double); 1093 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), 1094 double_handler.addr()); 1095 __ jcc(Assembler::notEqual, L); 1096 __ bind(push_double); 1097 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). 1098 __ bind(L); 1099 } 1100 #else 1101 __ push(dtos); 1102 #endif // _LP64 1103 1104 __ push(ltos); 1105 1106 // change thread state 1107 NOT_LP64(__ get_thread(thread)); 1108 __ movl(Address(thread, JavaThread::thread_state_offset()), 1109 _thread_in_native_trans); 1110 1111 if (os::is_MP()) { 1112 if (UseMembar) { 1113 // Force this write out before the read below 1114 __ membar(Assembler::Membar_mask_bits( 1115 Assembler::LoadLoad | Assembler::LoadStore | 1116 Assembler::StoreLoad | Assembler::StoreStore)); 1117 } else { 1118 // Write serialization page so VM thread can do a pseudo remote membar. 1119 // We use the current thread pointer to calculate a thread specific 1120 // offset to write to within the page. This minimizes bus traffic 1121 // due to cache line collision. 1122 __ serialize_memory(thread, rcx); 1123 } 1124 } 1125 1126 #ifndef _LP64 1127 if (AlwaysRestoreFPU) { 1128 // Make sure the control word is correct. 1129 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1130 } 1131 #endif // _LP64 1132 1133 // check for safepoint operation in progress and/or pending suspend requests 1134 { 1135 Label Continue; 1136 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 1137 SafepointSynchronize::_not_synchronized); 1138 1139 Label L; 1140 __ jcc(Assembler::notEqual, L); 1141 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); 1142 __ jcc(Assembler::equal, Continue); 1143 __ bind(L); 1144 1145 // Don't use call_VM as it will see a possible pending exception 1146 // and forward it and never return here preventing us from 1147 // clearing _last_native_pc down below. Also can't use 1148 // call_VM_leaf either as it will check to see if r13 & r14 are 1149 // preserved and correspond to the bcp/locals pointers. So we do a 1150 // runtime call by hand. 1151 // 1152 #ifndef _LP64 1153 __ push(thread); 1154 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, 1155 JavaThread::check_special_condition_for_native_trans))); 1156 __ increment(rsp, wordSize); 1157 __ get_thread(thread); 1158 #else 1159 __ mov(c_rarg0, r15_thread); 1160 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1161 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1162 __ andptr(rsp, -16); // align stack as required by ABI 1163 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 1164 __ mov(rsp, r12); // restore sp 1165 __ reinit_heapbase(); 1166 #endif // _LP64 1167 __ bind(Continue); 1168 } 1169 1170 // change thread state 1171 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); 1172 1173 // reset_last_Java_frame 1174 __ reset_last_Java_frame(thread, true); 1175 1176 // reset handle block 1177 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); 1178 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); 1179 1180 // If result is an oop unbox and store it in frame where gc will see it 1181 // and result handler will pick it up 1182 1183 { 1184 Label no_oop, store_result; 1185 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1186 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); 1187 __ jcc(Assembler::notEqual, no_oop); 1188 // retrieve result 1189 __ pop(ltos); 1190 __ testptr(rax, rax); 1191 __ jcc(Assembler::zero, store_result); 1192 __ movptr(rax, Address(rax, 0)); 1193 __ bind(store_result); 1194 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); 1195 // keep stack depth as expected by pushing oop which will eventually be discarded 1196 __ push(ltos); 1197 __ bind(no_oop); 1198 } 1199 1200 1201 { 1202 Label no_reguard; 1203 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), 1204 JavaThread::stack_guard_yellow_reserved_disabled); 1205 __ jcc(Assembler::notEqual, no_reguard); 1206 1207 __ pusha(); // XXX only save smashed registers 1208 #ifndef _LP64 1209 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1210 __ popa(); 1211 #else 1212 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1213 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1214 __ andptr(rsp, -16); // align stack as required by ABI 1215 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 1216 __ mov(rsp, r12); // restore sp 1217 __ popa(); // XXX only restore smashed registers 1218 __ reinit_heapbase(); 1219 #endif // _LP64 1220 1221 __ bind(no_reguard); 1222 } 1223 1224 1225 // The method register is junk from after the thread_in_native transition 1226 // until here. Also can't call_VM until the bcp has been 1227 // restored. Need bcp for throwing exception below so get it now. 1228 __ get_method(method); 1229 1230 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() 1231 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* 1232 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase 1233 1234 // handle exceptions (exception handling will handle unlocking!) 1235 { 1236 Label L; 1237 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 1238 __ jcc(Assembler::zero, L); 1239 // Note: At some point we may want to unify this with the code 1240 // used in call_VM_base(); i.e., we should use the 1241 // StubRoutines::forward_exception code. For now this doesn't work 1242 // here because the rsp is not correctly set at this point. 1243 __ MacroAssembler::call_VM(noreg, 1244 CAST_FROM_FN_PTR(address, 1245 InterpreterRuntime::throw_pending_exception)); 1246 __ should_not_reach_here(); 1247 __ bind(L); 1248 } 1249 1250 // do unlocking if necessary 1251 { 1252 Label L; 1253 __ movl(t, Address(method, Method::access_flags_offset())); 1254 __ testl(t, JVM_ACC_SYNCHRONIZED); 1255 __ jcc(Assembler::zero, L); 1256 // the code below should be shared with interpreter macro 1257 // assembler implementation 1258 { 1259 Label unlock; 1260 // BasicObjectLock will be first in list, since this is a 1261 // synchronized method. However, need to check that the object 1262 // has not been unlocked by an explicit monitorexit bytecode. 1263 const Address monitor(rbp, 1264 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1265 wordSize - (int)sizeof(BasicObjectLock))); 1266 1267 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1268 1269 // monitor expect in c_rarg1 for slow unlock path 1270 __ lea(regmon, monitor); // address of first monitor 1271 1272 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); 1273 __ shenandoah_store_addr_check(t); // Invariant 1274 __ testptr(t, t); 1275 __ jcc(Assembler::notZero, unlock); 1276 1277 // Entry already unlocked, need to throw exception 1278 __ MacroAssembler::call_VM(noreg, 1279 CAST_FROM_FN_PTR(address, 1280 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1281 __ should_not_reach_here(); 1282 1283 __ bind(unlock); 1284 __ unlock_object(regmon); 1285 } 1286 __ bind(L); 1287 } 1288 1289 // jvmti support 1290 // Note: This must happen _after_ handling/throwing any exceptions since 1291 // the exception handler code notifies the runtime of method exits 1292 // too. If this happens before, method entry/exit notifications are 1293 // not properly paired (was bug - gri 11/22/99). 1294 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1295 1296 // restore potential result in edx:eax, call result handler to 1297 // restore potential result in ST0 & handle result 1298 1299 __ pop(ltos); 1300 LP64_ONLY( __ pop(dtos)); 1301 1302 __ movptr(t, Address(rbp, 1303 (frame::interpreter_frame_result_handler_offset) * wordSize)); 1304 __ call(t); 1305 1306 // remove activation 1307 __ movptr(t, Address(rbp, 1308 frame::interpreter_frame_sender_sp_offset * 1309 wordSize)); // get sender sp 1310 __ leave(); // remove frame anchor 1311 __ pop(rdi); // get return address 1312 __ mov(rsp, t); // set sp to sender sp 1313 __ jmp(rdi); 1314 1315 if (inc_counter) { 1316 // Handle overflow of counter and compile method 1317 __ bind(invocation_counter_overflow); 1318 generate_counter_overflow(continue_after_compile); 1319 } 1320 1321 return entry_point; 1322 } 1323 1324 // Abstract method entry 1325 // Attempt to execute abstract method. Throw exception 1326 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 1327 1328 address entry_point = __ pc(); 1329 1330 // abstract method entry 1331 1332 // pop return address, reset last_sp to NULL 1333 __ empty_expression_stack(); 1334 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 1335 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 1336 1337 // throw exception 1338 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 1339 // the call_VM checks for exception, so we should never return here. 1340 __ should_not_reach_here(); 1341 1342 return entry_point; 1343 } 1344 1345 // 1346 // Generic interpreted method entry to (asm) interpreter 1347 // 1348 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1349 // determine code generation flags 1350 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1351 1352 // ebx: Method* 1353 // rbcp: sender sp 1354 address entry_point = __ pc(); 1355 1356 const Address constMethod(rbx, Method::const_offset()); 1357 const Address access_flags(rbx, Method::access_flags_offset()); 1358 const Address size_of_parameters(rdx, 1359 ConstMethod::size_of_parameters_offset()); 1360 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); 1361 1362 1363 // get parameter size (always needed) 1364 __ movptr(rdx, constMethod); 1365 __ load_unsigned_short(rcx, size_of_parameters); 1366 1367 // rbx: Method* 1368 // rcx: size of parameters 1369 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) 1370 1371 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words 1372 __ subl(rdx, rcx); // rdx = no. of additional locals 1373 1374 // YYY 1375 // __ incrementl(rdx); 1376 // __ andl(rdx, -2); 1377 1378 // see if we've got enough room on the stack for locals plus overhead. 1379 generate_stack_overflow_check(); 1380 1381 // get return address 1382 __ pop(rax); 1383 1384 // compute beginning of parameters 1385 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); 1386 1387 // rdx - # of additional locals 1388 // allocate space for locals 1389 // explicitly initialize locals 1390 { 1391 Label exit, loop; 1392 __ testl(rdx, rdx); 1393 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 1394 __ bind(loop); 1395 __ push((int) NULL_WORD); // initialize local variables 1396 __ decrementl(rdx); // until everything initialized 1397 __ jcc(Assembler::greater, loop); 1398 __ bind(exit); 1399 } 1400 1401 // initialize fixed part of activation frame 1402 generate_fixed_frame(false); 1403 1404 // make sure method is not native & not abstract 1405 #ifdef ASSERT 1406 __ movl(rax, access_flags); 1407 { 1408 Label L; 1409 __ testl(rax, JVM_ACC_NATIVE); 1410 __ jcc(Assembler::zero, L); 1411 __ stop("tried to execute native method as non-native"); 1412 __ bind(L); 1413 } 1414 { 1415 Label L; 1416 __ testl(rax, JVM_ACC_ABSTRACT); 1417 __ jcc(Assembler::zero, L); 1418 __ stop("tried to execute abstract method in interpreter"); 1419 __ bind(L); 1420 } 1421 #endif 1422 1423 // Since at this point in the method invocation the exception 1424 // handler would try to exit the monitor of synchronized methods 1425 // which hasn't been entered yet, we set the thread local variable 1426 // _do_not_unlock_if_synchronized to true. The remove_activation 1427 // will check this flag. 1428 1429 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); 1430 NOT_LP64(__ get_thread(thread)); 1431 const Address do_not_unlock_if_synchronized(thread, 1432 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1433 __ movbool(do_not_unlock_if_synchronized, true); 1434 1435 __ profile_parameters_type(rax, rcx, rdx); 1436 // increment invocation count & check for overflow 1437 Label invocation_counter_overflow; 1438 Label profile_method; 1439 Label profile_method_continue; 1440 if (inc_counter) { 1441 generate_counter_incr(&invocation_counter_overflow, 1442 &profile_method, 1443 &profile_method_continue); 1444 if (ProfileInterpreter) { 1445 __ bind(profile_method_continue); 1446 } 1447 } 1448 1449 Label continue_after_compile; 1450 __ bind(continue_after_compile); 1451 1452 // check for synchronized interpreted methods 1453 bang_stack_shadow_pages(false); 1454 1455 // reset the _do_not_unlock_if_synchronized flag 1456 NOT_LP64(__ get_thread(thread)); 1457 __ movbool(do_not_unlock_if_synchronized, false); 1458 1459 // check for synchronized methods 1460 // Must happen AFTER invocation_counter check and stack overflow check, 1461 // so method is not locked if overflows. 1462 if (synchronized) { 1463 // Allocate monitor and lock method 1464 lock_method(); 1465 } else { 1466 // no synchronization necessary 1467 #ifdef ASSERT 1468 { 1469 Label L; 1470 __ movl(rax, access_flags); 1471 __ testl(rax, JVM_ACC_SYNCHRONIZED); 1472 __ jcc(Assembler::zero, L); 1473 __ stop("method needs synchronization"); 1474 __ bind(L); 1475 } 1476 #endif 1477 } 1478 1479 // start execution 1480 #ifdef ASSERT 1481 { 1482 Label L; 1483 const Address monitor_block_top (rbp, 1484 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1485 __ movptr(rax, monitor_block_top); 1486 __ cmpptr(rax, rsp); 1487 __ jcc(Assembler::equal, L); 1488 __ stop("broken stack frame setup in interpreter"); 1489 __ bind(L); 1490 } 1491 #endif 1492 1493 // jvmti support 1494 __ notify_method_entry(); 1495 1496 __ dispatch_next(vtos); 1497 1498 // invocation counter overflow 1499 if (inc_counter) { 1500 if (ProfileInterpreter) { 1501 // We have decided to profile this method in the interpreter 1502 __ bind(profile_method); 1503 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1504 __ set_method_data_pointer_for_bcp(); 1505 __ get_method(rbx); 1506 __ jmp(profile_method_continue); 1507 } 1508 // Handle overflow of counter and compile method 1509 __ bind(invocation_counter_overflow); 1510 generate_counter_overflow(continue_after_compile); 1511 } 1512 1513 return entry_point; 1514 } 1515 1516 //----------------------------------------------------------------------------- 1517 // Exceptions 1518 1519 void TemplateInterpreterGenerator::generate_throw_exception() { 1520 // Entry point in previous activation (i.e., if the caller was 1521 // interpreted) 1522 Interpreter::_rethrow_exception_entry = __ pc(); 1523 // Restore sp to interpreter_frame_last_sp even though we are going 1524 // to empty the expression stack for the exception processing. 1525 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1526 // rax: exception 1527 // rdx: return address/pc that threw exception 1528 __ restore_bcp(); // r13/rsi points to call/send 1529 __ restore_locals(); 1530 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. 1531 // Entry point for exceptions thrown within interpreter code 1532 Interpreter::_throw_exception_entry = __ pc(); 1533 // expression stack is undefined here 1534 // rax: exception 1535 // r13/rsi: exception bcp 1536 __ verify_oop(rax); 1537 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); 1538 LP64_ONLY(__ mov(c_rarg1, rax)); 1539 1540 // expression stack must be empty before entering the VM in case of 1541 // an exception 1542 __ empty_expression_stack(); 1543 // find exception handler address and preserve exception oop 1544 __ call_VM(rdx, 1545 CAST_FROM_FN_PTR(address, 1546 InterpreterRuntime::exception_handler_for_exception), 1547 rarg); 1548 // rax: exception handler entry point 1549 // rdx: preserved exception oop 1550 // r13/rsi: bcp for exception handler 1551 __ push_ptr(rdx); // push exception which is now the only value on the stack 1552 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) 1553 1554 // If the exception is not handled in the current frame the frame is 1555 // removed and the exception is rethrown (i.e. exception 1556 // continuation is _rethrow_exception). 1557 // 1558 // Note: At this point the bci is still the bxi for the instruction 1559 // which caused the exception and the expression stack is 1560 // empty. Thus, for any VM calls at this point, GC will find a legal 1561 // oop map (with empty expression stack). 1562 1563 // In current activation 1564 // tos: exception 1565 // esi: exception bcp 1566 1567 // 1568 // JVMTI PopFrame support 1569 // 1570 1571 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1572 __ empty_expression_stack(); 1573 // Set the popframe_processing bit in pending_popframe_condition 1574 // indicating that we are currently handling popframe, so that 1575 // call_VMs that may happen later do not trigger new popframe 1576 // handling cycles. 1577 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1578 NOT_LP64(__ get_thread(thread)); 1579 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); 1580 __ orl(rdx, JavaThread::popframe_processing_bit); 1581 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); 1582 1583 { 1584 // Check to see whether we are returning to a deoptimized frame. 1585 // (The PopFrame call ensures that the caller of the popped frame is 1586 // either interpreted or compiled and deoptimizes it if compiled.) 1587 // In this case, we can't call dispatch_next() after the frame is 1588 // popped, but instead must save the incoming arguments and restore 1589 // them after deoptimization has occurred. 1590 // 1591 // Note that we don't compare the return PC against the 1592 // deoptimization blob's unpack entry because of the presence of 1593 // adapter frames in C2. 1594 Label caller_not_deoptimized; 1595 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); 1596 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); 1597 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1598 InterpreterRuntime::interpreter_contains), rarg); 1599 __ testl(rax, rax); 1600 __ jcc(Assembler::notZero, caller_not_deoptimized); 1601 1602 // Compute size of arguments for saving when returning to 1603 // deoptimized caller 1604 __ get_method(rax); 1605 __ movptr(rax, Address(rax, Method::const_offset())); 1606 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: 1607 size_of_parameters_offset()))); 1608 __ shll(rax, Interpreter::logStackElementSize); 1609 __ restore_locals(); 1610 __ subptr(rlocals, rax); 1611 __ addptr(rlocals, wordSize); 1612 // Save these arguments 1613 NOT_LP64(__ get_thread(thread)); 1614 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1615 Deoptimization:: 1616 popframe_preserve_args), 1617 thread, rax, rlocals); 1618 1619 __ remove_activation(vtos, rdx, 1620 /* throw_monitor_exception */ false, 1621 /* install_monitor_exception */ false, 1622 /* notify_jvmdi */ false); 1623 1624 // Inform deoptimization that it is responsible for restoring 1625 // these arguments 1626 NOT_LP64(__ get_thread(thread)); 1627 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1628 JavaThread::popframe_force_deopt_reexecution_bit); 1629 1630 // Continue in deoptimization handler 1631 __ jmp(rdx); 1632 1633 __ bind(caller_not_deoptimized); 1634 } 1635 1636 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ 1637 /* throw_monitor_exception */ false, 1638 /* install_monitor_exception */ false, 1639 /* notify_jvmdi */ false); 1640 1641 // Finish with popframe handling 1642 // A previous I2C followed by a deoptimization might have moved the 1643 // outgoing arguments further up the stack. PopFrame expects the 1644 // mutations to those outgoing arguments to be preserved and other 1645 // constraints basically require this frame to look exactly as 1646 // though it had previously invoked an interpreted activation with 1647 // no space between the top of the expression stack (current 1648 // last_sp) and the top of stack. Rather than force deopt to 1649 // maintain this kind of invariant all the time we call a small 1650 // fixup routine to move the mutated arguments onto the top of our 1651 // expression stack if necessary. 1652 #ifndef _LP64 1653 __ mov(rax, rsp); 1654 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1655 __ get_thread(thread); 1656 // PC must point into interpreter here 1657 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); 1658 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); 1659 __ get_thread(thread); 1660 #else 1661 __ mov(c_rarg1, rsp); 1662 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1663 // PC must point into interpreter here 1664 __ set_last_Java_frame(noreg, rbp, __ pc()); 1665 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); 1666 #endif 1667 __ reset_last_Java_frame(thread, true); 1668 1669 // Restore the last_sp and null it out 1670 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); 1671 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); 1672 1673 __ restore_bcp(); 1674 __ restore_locals(); 1675 // The method data pointer was incremented already during 1676 // call profiling. We have to restore the mdp for the current bcp. 1677 if (ProfileInterpreter) { 1678 __ set_method_data_pointer_for_bcp(); 1679 } 1680 1681 // Clear the popframe condition flag 1682 NOT_LP64(__ get_thread(thread)); 1683 __ movl(Address(thread, JavaThread::popframe_condition_offset()), 1684 JavaThread::popframe_inactive); 1685 1686 #if INCLUDE_JVMTI 1687 { 1688 Label L_done; 1689 const Register local0 = rlocals; 1690 1691 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); 1692 __ jcc(Assembler::notEqual, L_done); 1693 1694 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1695 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1696 1697 __ get_method(rdx); 1698 __ movptr(rax, Address(local0, 0)); 1699 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); 1700 1701 __ testptr(rax, rax); 1702 __ jcc(Assembler::zero, L_done); 1703 1704 __ movptr(Address(rbx, 0), rax); 1705 __ bind(L_done); 1706 } 1707 #endif // INCLUDE_JVMTI 1708 1709 __ dispatch_next(vtos); 1710 // end of PopFrame support 1711 1712 Interpreter::_remove_activation_entry = __ pc(); 1713 1714 // preserve exception over this code sequence 1715 __ pop_ptr(rax); 1716 NOT_LP64(__ get_thread(thread)); 1717 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); 1718 // remove the activation (without doing throws on illegalMonitorExceptions) 1719 __ remove_activation(vtos, rdx, false, true, false); 1720 // restore exception 1721 NOT_LP64(__ get_thread(thread)); 1722 __ get_vm_result(rax, thread); 1723 1724 // In between activations - previous activation type unknown yet 1725 // compute continuation point - the continuation point expects the 1726 // following registers set up: 1727 // 1728 // rax: exception 1729 // rdx: return address/pc that threw exception 1730 // rsp: expression stack of caller 1731 // rbp: ebp of caller 1732 __ push(rax); // save exception 1733 __ push(rdx); // save return address 1734 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1735 SharedRuntime::exception_handler_for_return_address), 1736 thread, rdx); 1737 __ mov(rbx, rax); // save exception handler 1738 __ pop(rdx); // restore return address 1739 __ pop(rax); // restore exception 1740 // Note that an "issuing PC" is actually the next PC after the call 1741 __ jmp(rbx); // jump to exception 1742 // handler of caller 1743 } 1744 1745 1746 // 1747 // JVMTI ForceEarlyReturn support 1748 // 1749 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1750 address entry = __ pc(); 1751 1752 __ restore_bcp(); 1753 __ restore_locals(); 1754 __ empty_expression_stack(); 1755 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse 1756 1757 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); 1758 NOT_LP64(__ get_thread(thread)); 1759 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); 1760 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); 1761 1762 // Clear the earlyret state 1763 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); 1764 1765 __ remove_activation(state, rsi, 1766 false, /* throw_monitor_exception */ 1767 false, /* install_monitor_exception */ 1768 true); /* notify_jvmdi */ 1769 __ jmp(rsi); 1770 1771 return entry; 1772 } // end of ForceEarlyReturn support 1773 1774 1775 //----------------------------------------------------------------------------- 1776 // Helper for vtos entry point generation 1777 1778 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1779 address& bep, 1780 address& cep, 1781 address& sep, 1782 address& aep, 1783 address& iep, 1784 address& lep, 1785 address& fep, 1786 address& dep, 1787 address& vep) { 1788 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1789 Label L; 1790 aep = __ pc(); __ push_ptr(); __ jmp(L); 1791 #ifndef _LP64 1792 fep = __ pc(); __ push(ftos); __ jmp(L); 1793 dep = __ pc(); __ push(dtos); __ jmp(L); 1794 #else 1795 fep = __ pc(); __ push_f(xmm0); __ jmp(L); 1796 dep = __ pc(); __ push_d(xmm0); __ jmp(L); 1797 #endif // _LP64 1798 lep = __ pc(); __ push_l(); __ jmp(L); 1799 bep = cep = sep = 1800 iep = __ pc(); __ push_i(); 1801 vep = __ pc(); 1802 __ bind(L); 1803 generate_and_dispatch(t); 1804 } 1805 1806 //----------------------------------------------------------------------------- 1807 1808 // Non-product code 1809 #ifndef PRODUCT 1810 1811 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1812 address entry = __ pc(); 1813 1814 #ifndef _LP64 1815 // prepare expression stack 1816 __ pop(rcx); // pop return address so expression stack is 'pure' 1817 __ push(state); // save tosca 1818 1819 // pass tosca registers as arguments & call tracer 1820 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx); 1821 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) 1822 __ pop(state); // restore tosca 1823 1824 // return 1825 __ jmp(rcx); 1826 #else 1827 __ push(state); 1828 __ push(c_rarg0); 1829 __ push(c_rarg1); 1830 __ push(c_rarg2); 1831 __ push(c_rarg3); 1832 __ mov(c_rarg2, rax); // Pass itos 1833 #ifdef _WIN64 1834 __ movflt(xmm3, xmm0); // Pass ftos 1835 #endif 1836 __ call_VM(noreg, 1837 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), 1838 c_rarg1, c_rarg2, c_rarg3); 1839 __ pop(c_rarg3); 1840 __ pop(c_rarg2); 1841 __ pop(c_rarg1); 1842 __ pop(c_rarg0); 1843 __ pop(state); 1844 __ ret(0); // return from result handler 1845 #endif // _LP64 1846 1847 return entry; 1848 } 1849 1850 void TemplateInterpreterGenerator::count_bytecode() { 1851 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); 1852 } 1853 1854 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1855 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); 1856 } 1857 1858 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1859 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); 1860 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); 1861 __ orl(rbx, 1862 ((int) t->bytecode()) << 1863 BytecodePairHistogram::log2_number_of_codes); 1864 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); 1865 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); 1866 __ incrementl(Address(rscratch1, rbx, Address::times_4)); 1867 } 1868 1869 1870 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1871 // Call a little run-time stub to avoid blow-up for each bytecode. 1872 // The run-time runtime saves the right registers, depending on 1873 // the tosca in-state for the given template. 1874 1875 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1876 "entry must have been generated"); 1877 #ifndef _LP64 1878 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1879 #else 1880 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 1881 __ andptr(rsp, -16); // align stack as required by ABI 1882 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 1883 __ mov(rsp, r12); // restore sp 1884 __ reinit_heapbase(); 1885 #endif // _LP64 1886 } 1887 1888 1889 void TemplateInterpreterGenerator::stop_interpreter_at() { 1890 Label L; 1891 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), 1892 StopInterpreterAt); 1893 __ jcc(Assembler::notEqual, L); 1894 __ int3(); 1895 __ bind(L); 1896 } 1897 #endif // !PRODUCT