1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterGenerator.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "interpreter/bytecodeTracer.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/debug.hpp" 50 #include <sys/types.h> 51 52 #ifndef PRODUCT 53 #include "oops/method.hpp" 54 #endif // !PRODUCT 55 56 #ifdef BUILTIN_SIM 57 #include "../../../../../../simulator/simulator.hpp" 58 #endif 59 60 #define __ _masm-> 61 62 #ifndef CC_INTERP 63 64 //----------------------------------------------------------------------------- 65 66 extern "C" void entry(CodeBuffer*); 67 68 //----------------------------------------------------------------------------- 69 70 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 71 address entry = __ pc(); 72 73 #ifdef ASSERT 74 { 75 Label L; 76 __ ldr(rscratch1, Address(rfp, 77 frame::interpreter_frame_monitor_block_top_offset * 78 wordSize)); 79 __ mov(rscratch2, sp); 80 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack 81 // grows negative) 82 __ br(Assembler::HS, L); // check if frame is complete 83 __ stop ("interpreter frame not set up"); 84 __ bind(L); 85 } 86 #endif // ASSERT 87 // Restore bcp under the assumption that the current frame is still 88 // interpreted 89 __ restore_bcp(); 90 91 // expression stack must be empty before entering the VM if an 92 // exception happened 93 __ empty_expression_stack(); 94 // throw exception 95 __ call_VM(noreg, 96 CAST_FROM_FN_PTR(address, 97 InterpreterRuntime::throw_StackOverflowError)); 98 return entry; 99 } 100 101 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( 102 const char* name) { 103 address entry = __ pc(); 104 // expression stack must be empty before entering the VM if an 105 // exception happened 106 __ empty_expression_stack(); 107 // setup parameters 108 // ??? convention: expect aberrant index in register r1 109 __ movw(c_rarg2, r1); 110 __ mov(c_rarg1, (address)name); 111 __ call_VM(noreg, 112 CAST_FROM_FN_PTR(address, 113 InterpreterRuntime:: 114 throw_ArrayIndexOutOfBoundsException), 115 c_rarg1, c_rarg2); 116 return entry; 117 } 118 119 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 120 address entry = __ pc(); 121 122 // object is at TOS 123 __ pop(c_rarg1); 124 125 // expression stack must be empty before entering the VM if an 126 // exception happened 127 __ empty_expression_stack(); 128 129 __ call_VM(noreg, 130 CAST_FROM_FN_PTR(address, 131 InterpreterRuntime:: 132 throw_ClassCastException), 133 c_rarg1); 134 return entry; 135 } 136 137 address TemplateInterpreterGenerator::generate_exception_handler_common( 138 const char* name, const char* message, bool pass_oop) { 139 assert(!pass_oop || message == NULL, "either oop or message but not both"); 140 address entry = __ pc(); 141 if (pass_oop) { 142 // object is at TOS 143 __ pop(c_rarg2); 144 } 145 // expression stack must be empty before entering the VM if an 146 // exception happened 147 __ empty_expression_stack(); 148 // setup parameters 149 __ lea(c_rarg1, Address((address)name)); 150 if (pass_oop) { 151 __ call_VM(r0, CAST_FROM_FN_PTR(address, 152 InterpreterRuntime:: 153 create_klass_exception), 154 c_rarg1, c_rarg2); 155 } else { 156 // kind of lame ExternalAddress can't take NULL because 157 // external_word_Relocation will assert. 158 if (message != NULL) { 159 __ lea(c_rarg2, Address((address)message)); 160 } else { 161 __ mov(c_rarg2, NULL_WORD); 162 } 163 __ call_VM(r0, 164 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), 165 c_rarg1, c_rarg2); 166 } 167 // throw exception 168 __ b(address(Interpreter::throw_exception_entry())); 169 return entry; 170 } 171 172 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { 173 address entry = __ pc(); 174 // NULL last_sp until next java call 175 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 176 __ dispatch_next(state); 177 return entry; 178 } 179 180 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 181 address entry = __ pc(); 182 183 // Restore stack bottom in case i2c adjusted stack 184 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 185 // and NULL it as marker that esp is now tos until next java call 186 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 187 __ restore_bcp(); 188 __ restore_locals(); 189 __ restore_constant_pool_cache(); 190 __ get_method(rmethod); 191 192 // Pop N words from the stack 193 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size); 194 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 195 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); 196 197 __ add(esp, esp, r1, Assembler::LSL, 3); 198 199 // Restore machine SP 200 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 201 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 202 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 203 __ ldr(rscratch2, 204 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 205 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 206 __ andr(sp, rscratch1, -16); 207 208 #ifndef PRODUCT 209 // tell the simulator that the method has been reentered 210 if (NotifySimulator) { 211 __ notify(Assembler::method_reentry); 212 } 213 #endif 214 __ get_dispatch(); 215 __ dispatch_next(state, step); 216 217 return entry; 218 } 219 220 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, 221 int step) { 222 address entry = __ pc(); 223 __ restore_bcp(); 224 __ restore_locals(); 225 __ restore_constant_pool_cache(); 226 __ get_method(rmethod); 227 228 // handle exceptions 229 { 230 Label L; 231 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 232 __ cbz(rscratch1, L); 233 __ call_VM(noreg, 234 CAST_FROM_FN_PTR(address, 235 InterpreterRuntime::throw_pending_exception)); 236 __ should_not_reach_here(); 237 __ bind(L); 238 } 239 240 __ get_dispatch(); 241 242 // Calculate stack limit 243 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 244 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 245 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 246 __ ldr(rscratch2, 247 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 248 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 249 __ andr(sp, rscratch1, -16); 250 251 // Restore expression stack pointer 252 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 253 // NULL last_sp until next java call 254 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 255 256 __ dispatch_next(state, step); 257 return entry; 258 } 259 260 261 int AbstractInterpreter::BasicType_as_index(BasicType type) { 262 int i = 0; 263 switch (type) { 264 case T_BOOLEAN: i = 0; break; 265 case T_CHAR : i = 1; break; 266 case T_BYTE : i = 2; break; 267 case T_SHORT : i = 3; break; 268 case T_INT : i = 4; break; 269 case T_LONG : i = 5; break; 270 case T_VOID : i = 6; break; 271 case T_FLOAT : i = 7; break; 272 case T_DOUBLE : i = 8; break; 273 case T_OBJECT : i = 9; break; 274 case T_ARRAY : i = 9; break; 275 default : ShouldNotReachHere(); 276 } 277 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, 278 "index out of bounds"); 279 return i; 280 } 281 282 283 address TemplateInterpreterGenerator::generate_result_handler_for( 284 BasicType type) { 285 address entry = __ pc(); 286 switch (type) { 287 case T_BOOLEAN: __ uxtb(r0, r0); break; 288 case T_CHAR : __ uxth(r0, r0); break; 289 case T_BYTE : __ sxtb(r0, r0); break; 290 case T_SHORT : __ sxth(r0, r0); break; 291 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this 292 case T_LONG : /* nothing to do */ break; 293 case T_VOID : /* nothing to do */ break; 294 case T_FLOAT : /* nothing to do */ break; 295 case T_DOUBLE : /* nothing to do */ break; 296 case T_OBJECT : 297 // retrieve result from frame 298 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 299 // and verify it 300 __ verify_oop(r0); 301 break; 302 default : ShouldNotReachHere(); 303 } 304 __ ret(lr); // return from result handler 305 return entry; 306 } 307 308 address TemplateInterpreterGenerator::generate_safept_entry_for( 309 TosState state, 310 address runtime_entry) { 311 address entry = __ pc(); 312 __ push(state); 313 __ call_VM(noreg, runtime_entry); 314 __ membar(Assembler::AnyAny); 315 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 316 return entry; 317 } 318 319 // Helpers for commoning out cases in the various type of method entries. 320 // 321 322 323 // increment invocation count & check for overflow 324 // 325 // Note: checking for negative value instead of overflow 326 // so we have a 'sticky' overflow test 327 // 328 // rmethod: method 329 // 330 void InterpreterGenerator::generate_counter_incr( 331 Label* overflow, 332 Label* profile_method, 333 Label* profile_method_continue) { 334 Label done; 335 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. 336 if (TieredCompilation) { 337 int increment = InvocationCounter::count_increment; 338 Label no_mdo; 339 if (ProfileInterpreter) { 340 // Are we profiling? 341 __ ldr(r0, Address(rmethod, Method::method_data_offset())); 342 __ cbz(r0, no_mdo); 343 // Increment counter in the MDO 344 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + 345 in_bytes(InvocationCounter::counter_offset())); 346 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset())); 347 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow); 348 __ b(done); 349 } 350 __ bind(no_mdo); 351 // Increment counter in MethodCounters 352 const Address invocation_counter(rscratch2, 353 MethodCounters::invocation_counter_offset() + 354 InvocationCounter::counter_offset()); 355 __ get_method_counters(rmethod, rscratch2, done); 356 const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset())); 357 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow); 358 __ bind(done); 359 } else { // not TieredCompilation 360 const Address backedge_counter(rscratch2, 361 MethodCounters::backedge_counter_offset() + 362 InvocationCounter::counter_offset()); 363 const Address invocation_counter(rscratch2, 364 MethodCounters::invocation_counter_offset() + 365 InvocationCounter::counter_offset()); 366 367 __ get_method_counters(rmethod, rscratch2, done); 368 369 if (ProfileInterpreter) { // %%% Merge this into MethodData* 370 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 371 __ addw(r1, r1, 1); 372 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); 373 } 374 // Update standard invocation counters 375 __ ldrw(r1, invocation_counter); 376 __ ldrw(r0, backedge_counter); 377 378 __ addw(r1, r1, InvocationCounter::count_increment); 379 __ andw(r0, r0, InvocationCounter::count_mask_value); 380 381 __ strw(r1, invocation_counter); 382 __ addw(r0, r0, r1); // add both counters 383 384 // profile_method is non-null only for interpreted method so 385 // profile_method != NULL == !native_call 386 387 if (ProfileInterpreter && profile_method != NULL) { 388 // Test to see if we should create a method data oop 389 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 390 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); 391 __ cmpw(r0, rscratch2); 392 __ br(Assembler::LT, *profile_method_continue); 393 394 // if no method data exists, go to profile_method 395 __ test_method_data_pointer(r0, *profile_method); 396 } 397 398 { 399 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); 400 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); 401 __ cmpw(r0, rscratch2); 402 __ br(Assembler::HS, *overflow); 403 } 404 __ bind(done); 405 } 406 } 407 408 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { 409 410 // Asm interpreter on entry 411 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] 412 // Everything as it was on entry 413 414 // InterpreterRuntime::frequency_counter_overflow takes two 415 // arguments, the first (thread) is passed by call_VM, the second 416 // indicates if the counter overflow occurs at a backwards branch 417 // (NULL bcp). We pass zero for it. The call returns the address 418 // of the verified entry point for the method or NULL if the 419 // compilation did not complete (either went background or bailed 420 // out). 421 __ mov(c_rarg1, 0); 422 __ call_VM(noreg, 423 CAST_FROM_FN_PTR(address, 424 InterpreterRuntime::frequency_counter_overflow), 425 c_rarg1); 426 427 __ b(*do_continue); 428 } 429 430 // See if we've got enough room on the stack for locals plus overhead. 431 // The expression stack grows down incrementally, so the normal guard 432 // page mechanism will work for that. 433 // 434 // NOTE: Since the additional locals are also always pushed (wasn't 435 // obvious in generate_method_entry) so the guard should work for them 436 // too. 437 // 438 // Args: 439 // r3: number of additional locals this frame needs (what we must check) 440 // rmethod: Method* 441 // 442 // Kills: 443 // r0 444 void InterpreterGenerator::generate_stack_overflow_check(void) { 445 446 // monitor entry size: see picture of stack set 447 // (generate_method_entry) and frame_amd64.hpp 448 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 449 450 // total overhead size: entry_size + (saved rbp through expr stack 451 // bottom). be sure to change this if you add/subtract anything 452 // to/from the overhead area 453 const int overhead_size = 454 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; 455 456 const int page_size = os::vm_page_size(); 457 458 Label after_frame_check; 459 460 // see if the frame is greater than one page in size. If so, 461 // then we need to verify there is enough stack space remaining 462 // for the additional locals. 463 // 464 // Note that we use SUBS rather than CMP here because the immediate 465 // field of this instruction may overflow. SUBS can cope with this 466 // because it is a macro that will expand to some number of MOV 467 // instructions and a register operation. 468 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); 469 __ br(Assembler::LS, after_frame_check); 470 471 // compute rsp as if this were going to be the last frame on 472 // the stack before the red zone 473 474 const Address stack_base(rthread, Thread::stack_base_offset()); 475 const Address stack_size(rthread, Thread::stack_size_offset()); 476 477 // locals + overhead, in bytes 478 __ mov(r0, overhead_size); 479 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. 480 481 __ ldr(rscratch1, stack_base); 482 __ ldr(rscratch2, stack_size); 483 484 #ifdef ASSERT 485 Label stack_base_okay, stack_size_okay; 486 // verify that thread stack base is non-zero 487 __ cbnz(rscratch1, stack_base_okay); 488 __ stop("stack base is zero"); 489 __ bind(stack_base_okay); 490 // verify that thread stack size is non-zero 491 __ cbnz(rscratch2, stack_size_okay); 492 __ stop("stack size is zero"); 493 __ bind(stack_size_okay); 494 #endif 495 496 // Add stack base to locals and subtract stack size 497 __ sub(rscratch1, rscratch1, rscratch2); // Stack limit 498 __ add(r0, r0, rscratch1); 499 500 // Use the maximum number of pages we might bang. 501 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : 502 (StackRedPages+StackYellowPages); 503 504 // add in the red and yellow zone sizes 505 __ add(r0, r0, max_pages * page_size * 2); 506 507 // check against the current stack bottom 508 __ cmp(sp, r0); 509 __ br(Assembler::HI, after_frame_check); 510 511 // Remove the incoming args, peeling the machine SP back to where it 512 // was in the caller. This is not strictly necessary, but unless we 513 // do so the stack frame may have a garbage FP; this ensures a 514 // correct call stack that we can always unwind. The ANDR should be 515 // unnecessary because the sender SP in r13 is always aligned, but 516 // it doesn't hurt. 517 __ andr(sp, r13, -16); 518 519 // Note: the restored frame is not necessarily interpreted. 520 // Use the shared runtime version of the StackOverflowError. 521 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); 522 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); 523 524 // all done with frame size check 525 __ bind(after_frame_check); 526 } 527 528 // Allocate monitor and lock method (asm interpreter) 529 // 530 // Args: 531 // rmethod: Method* 532 // rlocals: locals 533 // 534 // Kills: 535 // r0 536 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) 537 // rscratch1, rscratch2 (scratch regs) 538 void InterpreterGenerator::lock_method(void) { 539 // synchronize method 540 const Address access_flags(rmethod, Method::access_flags_offset()); 541 const Address monitor_block_top( 542 rfp, 543 frame::interpreter_frame_monitor_block_top_offset * wordSize); 544 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 545 546 #ifdef ASSERT 547 { 548 Label L; 549 __ ldrw(r0, access_flags); 550 __ tst(r0, JVM_ACC_SYNCHRONIZED); 551 __ br(Assembler::NE, L); 552 __ stop("method doesn't need synchronization"); 553 __ bind(L); 554 } 555 #endif // ASSERT 556 557 // get synchronization object 558 { 559 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 560 Label done; 561 __ ldrw(r0, access_flags); 562 __ tst(r0, JVM_ACC_STATIC); 563 // get receiver (assume this is frequent case) 564 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); 565 __ br(Assembler::EQ, done); 566 __ ldr(r0, Address(rmethod, Method::const_offset())); 567 __ ldr(r0, Address(r0, ConstMethod::constants_offset())); 568 __ ldr(r0, Address(r0, 569 ConstantPool::pool_holder_offset_in_bytes())); 570 __ ldr(r0, Address(r0, mirror_offset)); 571 572 #ifdef ASSERT 573 { 574 Label L; 575 __ cbnz(r0, L); 576 __ stop("synchronization object is NULL"); 577 __ bind(L); 578 } 579 #endif // ASSERT 580 581 __ bind(done); 582 } 583 584 // add space for monitor & lock 585 __ sub(sp, sp, entry_size); // add space for a monitor entry 586 __ sub(esp, esp, entry_size); 587 __ mov(rscratch1, esp); 588 __ str(rscratch1, monitor_block_top); // set new monitor block top 589 // store object 590 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); 591 __ mov(c_rarg1, esp); // object address 592 __ lock_object(c_rarg1); 593 } 594 595 // Generate a fixed interpreter frame. This is identical setup for 596 // interpreted methods and for native methods hence the shared code. 597 // 598 // Args: 599 // lr: return address 600 // rmethod: Method* 601 // rlocals: pointer to locals 602 // rcpool: cp cache 603 // stack_pointer: previous sp 604 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 605 // initialize fixed part of activation frame 606 if (native_call) { 607 __ sub(esp, sp, 12 * wordSize); 608 __ mov(rbcp, zr); 609 __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize))); 610 // add 2 zero-initialized slots for native calls 611 __ stp(zr, zr, Address(sp, 10 * wordSize)); 612 } else { 613 __ sub(esp, sp, 10 * wordSize); 614 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod 615 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase 616 __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize))); 617 } 618 619 if (ProfileInterpreter) { 620 Label method_data_continue; 621 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); 622 __ cbz(rscratch1, method_data_continue); 623 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); 624 __ bind(method_data_continue); 625 __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize)); // save Method* and mdp (method data pointer) 626 } else { 627 __ stp(zr, rmethod, Address(sp, 4 * wordSize)); // save Method* (no mdp) 628 } 629 630 __ ldr(rcpool, Address(rmethod, Method::const_offset())); 631 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); 632 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); 633 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); 634 635 __ stp(rfp, lr, Address(sp, 8 * wordSize)); 636 __ lea(rfp, Address(sp, 8 * wordSize)); 637 638 // set sender sp 639 // leave last_sp as null 640 __ stp(zr, r13, Address(sp, 6 * wordSize)); 641 642 // Move SP out of the way 643 if (! native_call) { 644 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 645 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 646 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); 647 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); 648 __ andr(sp, rscratch1, -16); 649 } 650 } 651 652 // End of helpers 653 654 // Various method entries 655 //------------------------------------------------------------------------------------------------------------------------ 656 // 657 // 658 659 // Method entry for java.lang.ref.Reference.get. 660 address InterpreterGenerator::generate_Reference_get_entry(void) { 661 #if INCLUDE_ALL_GCS 662 // Code: _aload_0, _getfield, _areturn 663 // parameter size = 1 664 // 665 // The code that gets generated by this routine is split into 2 parts: 666 // 1. The "intrinsified" code for G1 (or any SATB based GC), 667 // 2. The slow path - which is an expansion of the regular method entry. 668 // 669 // Notes:- 670 // * In the G1 code we do not check whether we need to block for 671 // a safepoint. If G1 is enabled then we must execute the specialized 672 // code for Reference.get (except when the Reference object is null) 673 // so that we can log the value in the referent field with an SATB 674 // update buffer. 675 // If the code for the getfield template is modified so that the 676 // G1 pre-barrier code is executed when the current method is 677 // Reference.get() then going through the normal method entry 678 // will be fine. 679 // * The G1 code can, however, check the receiver object (the instance 680 // of java.lang.Reference) and jump to the slow path if null. If the 681 // Reference object is null then we obviously cannot fetch the referent 682 // and so we don't need to call the G1 pre-barrier. Thus we can use the 683 // regular method entry code to generate the NPE. 684 // 685 // This code is based on generate_accessor_enty. 686 // 687 // rmethod: Method* 688 // r13: senderSP must preserve for slow path, set SP to it on fast path 689 690 address entry = __ pc(); 691 692 const int referent_offset = java_lang_ref_Reference::referent_offset; 693 guarantee(referent_offset > 0, "referent offset not initialized"); 694 695 if (UseG1GC) { 696 Label slow_path; 697 const Register local_0 = c_rarg0; 698 // Check if local 0 != NULL 699 // If the receiver is null then it is OK to jump to the slow path. 700 __ ldr(local_0, Address(esp, 0)); 701 __ cbz(local_0, slow_path); 702 703 704 // Load the value of the referent field. 705 const Address field_address(local_0, referent_offset); 706 __ load_heap_oop(local_0, field_address); 707 708 // Generate the G1 pre-barrier code to log the value of 709 // the referent field in an SATB buffer. 710 __ enter(); // g1_write may call runtime 711 __ g1_write_barrier_pre(noreg /* obj */, 712 local_0 /* pre_val */, 713 rthread /* thread */, 714 rscratch2 /* tmp */, 715 true /* tosca_live */, 716 true /* expand_call */); 717 __ leave(); 718 // areturn 719 __ andr(sp, r13, -16); // done with stack 720 __ ret(lr); 721 722 // generate a vanilla interpreter entry as the slow path 723 __ bind(slow_path); 724 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 725 return entry; 726 } 727 #endif // INCLUDE_ALL_GCS 728 729 // If G1 is not enabled then attempt to go through the accessor entry point 730 // Reference.get is an accessor 731 return generate_accessor_entry(); 732 } 733 734 /** 735 * Method entry for static native methods: 736 * int java.util.zip.CRC32.update(int crc, int b) 737 */ 738 address InterpreterGenerator::generate_CRC32_update_entry() { 739 if (UseCRC32Intrinsics) { 740 address entry = __ pc(); 741 742 // rmethod: Method* 743 // r13: senderSP must preserved for slow path 744 // esp: args 745 746 Label slow_path; 747 // If we need a safepoint check, generate full interpreter entry. 748 ExternalAddress state(SafepointSynchronize::address_of_state()); 749 unsigned long offset; 750 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 751 __ ldrw(rscratch1, Address(rscratch1, offset)); 752 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 753 __ cbnz(rscratch1, slow_path); 754 755 // We don't generate local frame and don't align stack because 756 // we call stub code and there is no safepoint on this path. 757 758 // Load parameters 759 const Register crc = c_rarg0; // crc 760 const Register val = c_rarg1; // source java byte value 761 const Register tbl = c_rarg2; // scratch 762 763 // Arguments are reversed on java expression stack 764 __ ldrw(val, Address(esp, 0)); // byte value 765 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC 766 767 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); 768 __ add(tbl, tbl, offset); 769 770 __ ornw(crc, zr, crc); // ~crc 771 __ update_byte_crc32(crc, val, tbl); 772 __ ornw(crc, zr, crc); // ~crc 773 774 // result in c_rarg0 775 776 __ andr(sp, r13, -16); 777 __ ret(lr); 778 779 // generate a vanilla native entry as the slow path 780 __ bind(slow_path); 781 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 782 return entry; 783 } 784 return NULL; 785 } 786 787 /** 788 * Method entry for static native methods: 789 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 790 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 791 */ 792 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 793 if (UseCRC32Intrinsics) { 794 address entry = __ pc(); 795 796 // rmethod,: Method* 797 // r13: senderSP must preserved for slow path 798 799 Label slow_path; 800 // If we need a safepoint check, generate full interpreter entry. 801 ExternalAddress state(SafepointSynchronize::address_of_state()); 802 unsigned long offset; 803 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); 804 __ ldrw(rscratch1, Address(rscratch1, offset)); 805 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); 806 __ cbnz(rscratch1, slow_path); 807 808 // We don't generate local frame and don't align stack because 809 // we call stub code and there is no safepoint on this path. 810 811 // Load parameters 812 const Register crc = c_rarg0; // crc 813 const Register buf = c_rarg1; // source java byte array address 814 const Register len = c_rarg2; // length 815 const Register off = len; // offset (never overlaps with 'len') 816 817 // Arguments are reversed on java expression stack 818 // Calculate address of start element 819 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 820 __ ldr(buf, Address(esp, 2*wordSize)); // long buf 821 __ ldrw(off, Address(esp, wordSize)); // offset 822 __ add(buf, buf, off); // + offset 823 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC 824 } else { 825 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array 826 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 827 __ ldrw(off, Address(esp, wordSize)); // offset 828 __ add(buf, buf, off); // + offset 829 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC 830 } 831 // Can now load 'len' since we're finished with 'off' 832 __ ldrw(len, Address(esp, 0x0)); // Length 833 834 __ andr(sp, r13, -16); // Restore the caller's SP 835 836 // We are frameless so we can just jump to the stub. 837 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); 838 839 // generate a vanilla native entry as the slow path 840 __ bind(slow_path); 841 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); 842 return entry; 843 } 844 return NULL; 845 } 846 847 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 848 // Bang each page in the shadow zone. We can't assume it's been done for 849 // an interpreter frame with greater than a page of locals, so each page 850 // needs to be checked. Only true for non-native. 851 if (UseStackBanging) { 852 const int start_page = native_call ? StackShadowPages : 1; 853 const int page_size = os::vm_page_size(); 854 for (int pages = start_page; pages <= StackShadowPages ; pages++) { 855 __ sub(rscratch2, sp, pages*page_size); 856 __ str(zr, Address(rscratch2)); 857 } 858 } 859 } 860 861 862 // Interpreter stub for calling a native method. (asm interpreter) 863 // This sets up a somewhat different looking stack for calling the 864 // native method than the typical interpreter frame setup. 865 address InterpreterGenerator::generate_native_entry(bool synchronized) { 866 // determine code generation flags 867 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 868 869 // r1: Method* 870 // rscratch1: sender sp 871 872 address entry_point = __ pc(); 873 874 const Address constMethod (rmethod, Method::const_offset()); 875 const Address access_flags (rmethod, Method::access_flags_offset()); 876 const Address size_of_parameters(r2, ConstMethod:: 877 size_of_parameters_offset()); 878 879 // get parameter size (always needed) 880 __ ldr(r2, constMethod); 881 __ load_unsigned_short(r2, size_of_parameters); 882 883 // native calls don't need the stack size check since they have no 884 // expression stack and the arguments are already on the stack and 885 // we only add a handful of words to the stack 886 887 // rmethod: Method* 888 // r2: size of parameters 889 // rscratch1: sender sp 890 891 // for natives the size of locals is zero 892 893 // compute beginning of parameters (rlocals) 894 __ add(rlocals, esp, r2, ext::uxtx, 3); 895 __ add(rlocals, rlocals, -wordSize); 896 897 // Pull SP back to minimum size: this avoids holes in the stack 898 __ andr(sp, esp, -16); 899 900 // initialize fixed part of activation frame 901 generate_fixed_frame(true); 902 #ifndef PRODUCT 903 // tell the simulator that a method has been entered 904 if (NotifySimulator) { 905 __ notify(Assembler::method_entry); 906 } 907 #endif 908 909 // make sure method is native & not abstract 910 #ifdef ASSERT 911 __ ldrw(r0, access_flags); 912 { 913 Label L; 914 __ tst(r0, JVM_ACC_NATIVE); 915 __ br(Assembler::NE, L); 916 __ stop("tried to execute non-native method as native"); 917 __ bind(L); 918 } 919 { 920 Label L; 921 __ tst(r0, JVM_ACC_ABSTRACT); 922 __ br(Assembler::EQ, L); 923 __ stop("tried to execute abstract method in interpreter"); 924 __ bind(L); 925 } 926 #endif 927 928 // Since at this point in the method invocation the exception 929 // handler would try to exit the monitor of synchronized methods 930 // which hasn't been entered yet, we set the thread local variable 931 // _do_not_unlock_if_synchronized to true. The remove_activation 932 // will check this flag. 933 934 const Address do_not_unlock_if_synchronized(rthread, 935 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 936 __ mov(rscratch2, true); 937 __ strb(rscratch2, do_not_unlock_if_synchronized); 938 939 // increment invocation count & check for overflow 940 Label invocation_counter_overflow; 941 if (inc_counter) { 942 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 943 } 944 945 Label continue_after_compile; 946 __ bind(continue_after_compile); 947 948 bang_stack_shadow_pages(true); 949 950 // reset the _do_not_unlock_if_synchronized flag 951 __ strb(zr, do_not_unlock_if_synchronized); 952 953 // check for synchronized methods 954 // Must happen AFTER invocation_counter check and stack overflow check, 955 // so method is not locked if overflows. 956 if (synchronized) { 957 lock_method(); 958 } else { 959 // no synchronization necessary 960 #ifdef ASSERT 961 { 962 Label L; 963 __ ldrw(r0, access_flags); 964 __ tst(r0, JVM_ACC_SYNCHRONIZED); 965 __ br(Assembler::EQ, L); 966 __ stop("method needs synchronization"); 967 __ bind(L); 968 } 969 #endif 970 } 971 972 // start execution 973 #ifdef ASSERT 974 { 975 Label L; 976 const Address monitor_block_top(rfp, 977 frame::interpreter_frame_monitor_block_top_offset * wordSize); 978 __ ldr(rscratch1, monitor_block_top); 979 __ cmp(esp, rscratch1); 980 __ br(Assembler::EQ, L); 981 __ stop("broken stack frame setup in interpreter"); 982 __ bind(L); 983 } 984 #endif 985 986 // jvmti support 987 __ notify_method_entry(); 988 989 // work registers 990 const Register t = r17; 991 const Register result_handler = r19; 992 993 // allocate space for parameters 994 __ ldr(t, Address(rmethod, Method::const_offset())); 995 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); 996 997 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); 998 __ andr(sp, rscratch1, -16); 999 __ mov(esp, rscratch1); 1000 1001 // get signature handler 1002 { 1003 Label L; 1004 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1005 __ cbnz(t, L); 1006 __ call_VM(noreg, 1007 CAST_FROM_FN_PTR(address, 1008 InterpreterRuntime::prepare_native_call), 1009 rmethod); 1010 __ ldr(t, Address(rmethod, Method::signature_handler_offset())); 1011 __ bind(L); 1012 } 1013 1014 // call signature handler 1015 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, 1016 "adjust this code"); 1017 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, 1018 "adjust this code"); 1019 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, 1020 "adjust this code"); 1021 1022 // The generated handlers do not touch rmethod (the method). 1023 // However, large signatures cannot be cached and are generated 1024 // each time here. The slow-path generator can do a GC on return, 1025 // so we must reload it after the call. 1026 __ blr(t); 1027 __ get_method(rmethod); // slow path can do a GC, reload rmethod 1028 1029 1030 // result handler is in r0 1031 // set result handler 1032 __ mov(result_handler, r0); 1033 // pass mirror handle if static call 1034 { 1035 Label L; 1036 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 1037 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1038 __ tst(t, JVM_ACC_STATIC); 1039 __ br(Assembler::EQ, L); 1040 // get mirror 1041 __ ldr(t, Address(rmethod, Method::const_offset())); 1042 __ ldr(t, Address(t, ConstMethod::constants_offset())); 1043 __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); 1044 __ ldr(t, Address(t, mirror_offset)); 1045 // copy mirror into activation frame 1046 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); 1047 // pass handle to mirror 1048 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); 1049 __ bind(L); 1050 } 1051 1052 // get native function entry point in r10 1053 { 1054 Label L; 1055 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1056 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); 1057 __ mov(rscratch2, unsatisfied); 1058 __ ldr(rscratch2, rscratch2); 1059 __ cmp(r10, rscratch2); 1060 __ br(Assembler::NE, L); 1061 __ call_VM(noreg, 1062 CAST_FROM_FN_PTR(address, 1063 InterpreterRuntime::prepare_native_call), 1064 rmethod); 1065 __ get_method(rmethod); 1066 __ ldr(r10, Address(rmethod, Method::native_function_offset())); 1067 __ bind(L); 1068 } 1069 1070 // pass JNIEnv 1071 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); 1072 1073 // It is enough that the pc() points into the right code 1074 // segment. It does not have to be the correct return pc. 1075 __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1); 1076 1077 // change thread state 1078 #ifdef ASSERT 1079 { 1080 Label L; 1081 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); 1082 __ cmp(t, _thread_in_Java); 1083 __ br(Assembler::EQ, L); 1084 __ stop("Wrong thread state in native stub"); 1085 __ bind(L); 1086 } 1087 #endif 1088 1089 // Change state to native 1090 __ mov(rscratch1, _thread_in_native); 1091 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1092 __ stlrw(rscratch1, rscratch2); 1093 1094 // Call the native method. 1095 __ blrt(r10, rscratch1); 1096 __ maybe_isb(); 1097 __ get_method(rmethod); 1098 // result potentially in r0 or v0 1099 1100 // make room for the pushes we're about to do 1101 __ sub(rscratch1, esp, 4 * wordSize); 1102 __ andr(sp, rscratch1, -16); 1103 1104 // NOTE: The order of these pushes is known to frame::interpreter_frame_result 1105 // in order to extract the result of a method call. If the order of these 1106 // pushes change or anything else is added to the stack then the code in 1107 // interpreter_frame_result must also change. 1108 __ push(dtos); 1109 __ push(ltos); 1110 1111 // change thread state 1112 __ mov(rscratch1, _thread_in_native_trans); 1113 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1114 __ stlrw(rscratch1, rscratch2); 1115 1116 if (os::is_MP()) { 1117 if (UseMembar) { 1118 // Force this write out before the read below 1119 __ dsb(Assembler::SY); 1120 } else { 1121 // Write serialization page so VM thread can do a pseudo remote membar. 1122 // We use the current thread pointer to calculate a thread specific 1123 // offset to write to within the page. This minimizes bus traffic 1124 // due to cache line collision. 1125 __ serialize_memory(rthread, rscratch2); 1126 } 1127 } 1128 1129 // check for safepoint operation in progress and/or pending suspend requests 1130 { 1131 Label Continue; 1132 { 1133 unsigned long offset; 1134 __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset); 1135 __ ldrw(rscratch2, Address(rscratch2, offset)); 1136 } 1137 assert(SafepointSynchronize::_not_synchronized == 0, 1138 "SafepointSynchronize::_not_synchronized"); 1139 Label L; 1140 __ cbnz(rscratch2, L); 1141 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); 1142 __ cbz(rscratch2, Continue); 1143 __ bind(L); 1144 1145 // Don't use call_VM as it will see a possible pending exception 1146 // and forward it and never return here preventing us from 1147 // clearing _last_native_pc down below. So we do a runtime call by 1148 // hand. 1149 // 1150 __ mov(c_rarg0, rthread); 1151 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); 1152 __ blrt(rscratch2, 1, 0, 0); 1153 __ maybe_isb(); 1154 __ get_method(rmethod); 1155 __ reinit_heapbase(); 1156 __ bind(Continue); 1157 } 1158 1159 // change thread state 1160 __ mov(rscratch1, _thread_in_Java); 1161 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); 1162 __ stlrw(rscratch1, rscratch2); 1163 1164 // reset_last_Java_frame 1165 __ reset_last_Java_frame(true, true); 1166 1167 // reset handle block 1168 __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); 1169 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); 1170 1171 // If result is an oop unbox and store it in frame where gc will see it 1172 // and result handler will pick it up 1173 1174 { 1175 Label no_oop, store_result; 1176 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); 1177 __ cmp(t, result_handler); 1178 __ br(Assembler::NE, no_oop); 1179 // retrieve result 1180 __ pop(ltos); 1181 __ cbz(r0, store_result); 1182 __ ldr(r0, Address(r0, 0)); 1183 __ bind(store_result); 1184 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); 1185 // keep stack depth as expected by pushing oop which will eventually be discarded 1186 __ push(ltos); 1187 __ bind(no_oop); 1188 } 1189 1190 { 1191 Label no_reguard; 1192 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); 1193 __ ldrb(rscratch1, Address(rscratch1)); 1194 __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled); 1195 __ br(Assembler::NE, no_reguard); 1196 1197 __ pusha(); // XXX only save smashed registers 1198 __ mov(c_rarg0, rthread); 1199 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 1200 __ blrt(rscratch2, 0, 0, 0); 1201 __ popa(); // XXX only restore smashed registers 1202 __ bind(no_reguard); 1203 } 1204 1205 // The method register is junk from after the thread_in_native transition 1206 // until here. Also can't call_VM until the bcp has been 1207 // restored. Need bcp for throwing exception below so get it now. 1208 __ get_method(rmethod); 1209 1210 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=> 1211 // rbcp == code_base() 1212 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* 1213 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase 1214 // handle exceptions (exception handling will handle unlocking!) 1215 { 1216 Label L; 1217 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 1218 __ cbz(rscratch1, L); 1219 // Note: At some point we may want to unify this with the code 1220 // used in call_VM_base(); i.e., we should use the 1221 // StubRoutines::forward_exception code. For now this doesn't work 1222 // here because the rsp is not correctly set at this point. 1223 __ MacroAssembler::call_VM(noreg, 1224 CAST_FROM_FN_PTR(address, 1225 InterpreterRuntime::throw_pending_exception)); 1226 __ should_not_reach_here(); 1227 __ bind(L); 1228 } 1229 1230 // do unlocking if necessary 1231 { 1232 Label L; 1233 __ ldrw(t, Address(rmethod, Method::access_flags_offset())); 1234 __ tst(t, JVM_ACC_SYNCHRONIZED); 1235 __ br(Assembler::EQ, L); 1236 // the code below should be shared with interpreter macro 1237 // assembler implementation 1238 { 1239 Label unlock; 1240 // BasicObjectLock will be first in list, since this is a 1241 // synchronized method. However, need to check that the object 1242 // has not been unlocked by an explicit monitorexit bytecode. 1243 1244 // monitor expect in c_rarg1 for slow unlock path 1245 __ lea (c_rarg1, Address(rfp, // address of first monitor 1246 (intptr_t)(frame::interpreter_frame_initial_sp_offset * 1247 wordSize - sizeof(BasicObjectLock)))); 1248 1249 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); 1250 __ cbnz(t, unlock); 1251 1252 // Entry already unlocked, need to throw exception 1253 __ MacroAssembler::call_VM(noreg, 1254 CAST_FROM_FN_PTR(address, 1255 InterpreterRuntime::throw_illegal_monitor_state_exception)); 1256 __ should_not_reach_here(); 1257 1258 __ bind(unlock); 1259 __ unlock_object(c_rarg1); 1260 } 1261 __ bind(L); 1262 } 1263 1264 // jvmti support 1265 // Note: This must happen _after_ handling/throwing any exceptions since 1266 // the exception handler code notifies the runtime of method exits 1267 // too. If this happens before, method entry/exit notifications are 1268 // not properly paired (was bug - gri 11/22/99). 1269 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); 1270 1271 // restore potential result in r0:d0, call result handler to 1272 // restore potential result in ST0 & handle result 1273 1274 __ pop(ltos); 1275 __ pop(dtos); 1276 1277 __ blr(result_handler); 1278 1279 // remove activation 1280 __ ldr(esp, Address(rfp, 1281 frame::interpreter_frame_sender_sp_offset * 1282 wordSize)); // get sender sp 1283 // remove frame anchor 1284 __ leave(); 1285 1286 // resture sender sp 1287 __ mov(sp, esp); 1288 1289 __ ret(lr); 1290 1291 if (inc_counter) { 1292 // Handle overflow of counter and compile method 1293 __ bind(invocation_counter_overflow); 1294 generate_counter_overflow(&continue_after_compile); 1295 } 1296 1297 return entry_point; 1298 } 1299 1300 // 1301 // Generic interpreted method entry to (asm) interpreter 1302 // 1303 address InterpreterGenerator::generate_normal_entry(bool synchronized) { 1304 // determine code generation flags 1305 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1306 1307 // rscratch1: sender sp 1308 address entry_point = __ pc(); 1309 1310 const Address constMethod(rmethod, Method::const_offset()); 1311 const Address access_flags(rmethod, Method::access_flags_offset()); 1312 const Address size_of_parameters(r3, 1313 ConstMethod::size_of_parameters_offset()); 1314 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); 1315 1316 // get parameter size (always needed) 1317 // need to load the const method first 1318 __ ldr(r3, constMethod); 1319 __ load_unsigned_short(r2, size_of_parameters); 1320 1321 // r2: size of parameters 1322 1323 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words 1324 __ sub(r3, r3, r2); // r3 = no. of additional locals 1325 1326 // see if we've got enough room on the stack for locals plus overhead. 1327 generate_stack_overflow_check(); 1328 1329 // compute beginning of parameters (rlocals) 1330 __ add(rlocals, esp, r2, ext::uxtx, 3); 1331 __ sub(rlocals, rlocals, wordSize); 1332 1333 // Make room for locals 1334 __ sub(rscratch1, esp, r3, ext::uxtx, 3); 1335 __ andr(sp, rscratch1, -16); 1336 1337 // r3 - # of additional locals 1338 // allocate space for locals 1339 // explicitly initialize locals 1340 { 1341 Label exit, loop; 1342 __ ands(zr, r3, r3); 1343 __ br(Assembler::LE, exit); // do nothing if r3 <= 0 1344 __ bind(loop); 1345 __ str(zr, Address(__ post(rscratch1, wordSize))); 1346 __ sub(r3, r3, 1); // until everything initialized 1347 __ cbnz(r3, loop); 1348 __ bind(exit); 1349 } 1350 1351 // And the base dispatch table 1352 __ get_dispatch(); 1353 1354 // initialize fixed part of activation frame 1355 generate_fixed_frame(false); 1356 #ifndef PRODUCT 1357 // tell the simulator that a method has been entered 1358 if (NotifySimulator) { 1359 __ notify(Assembler::method_entry); 1360 } 1361 #endif 1362 // make sure method is not native & not abstract 1363 #ifdef ASSERT 1364 __ ldrw(r0, access_flags); 1365 { 1366 Label L; 1367 __ tst(r0, JVM_ACC_NATIVE); 1368 __ br(Assembler::EQ, L); 1369 __ stop("tried to execute native method as non-native"); 1370 __ bind(L); 1371 } 1372 { 1373 Label L; 1374 __ tst(r0, JVM_ACC_ABSTRACT); 1375 __ br(Assembler::EQ, L); 1376 __ stop("tried to execute abstract method in interpreter"); 1377 __ bind(L); 1378 } 1379 #endif 1380 1381 // Since at this point in the method invocation the exception 1382 // handler would try to exit the monitor of synchronized methods 1383 // which hasn't been entered yet, we set the thread local variable 1384 // _do_not_unlock_if_synchronized to true. The remove_activation 1385 // will check this flag. 1386 1387 const Address do_not_unlock_if_synchronized(rthread, 1388 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); 1389 __ mov(rscratch2, true); 1390 __ strb(rscratch2, do_not_unlock_if_synchronized); 1391 1392 // increment invocation count & check for overflow 1393 Label invocation_counter_overflow; 1394 Label profile_method; 1395 Label profile_method_continue; 1396 if (inc_counter) { 1397 generate_counter_incr(&invocation_counter_overflow, 1398 &profile_method, 1399 &profile_method_continue); 1400 if (ProfileInterpreter) { 1401 __ bind(profile_method_continue); 1402 } 1403 } 1404 1405 Label continue_after_compile; 1406 __ bind(continue_after_compile); 1407 1408 bang_stack_shadow_pages(false); 1409 1410 // reset the _do_not_unlock_if_synchronized flag 1411 __ strb(zr, do_not_unlock_if_synchronized); 1412 1413 // check for synchronized methods 1414 // Must happen AFTER invocation_counter check and stack overflow check, 1415 // so method is not locked if overflows. 1416 if (synchronized) { 1417 // Allocate monitor and lock method 1418 lock_method(); 1419 } else { 1420 // no synchronization necessary 1421 #ifdef ASSERT 1422 { 1423 Label L; 1424 __ ldrw(r0, access_flags); 1425 __ tst(r0, JVM_ACC_SYNCHRONIZED); 1426 __ br(Assembler::EQ, L); 1427 __ stop("method needs synchronization"); 1428 __ bind(L); 1429 } 1430 #endif 1431 } 1432 1433 // start execution 1434 #ifdef ASSERT 1435 { 1436 Label L; 1437 const Address monitor_block_top (rfp, 1438 frame::interpreter_frame_monitor_block_top_offset * wordSize); 1439 __ ldr(rscratch1, monitor_block_top); 1440 __ cmp(esp, rscratch1); 1441 __ br(Assembler::EQ, L); 1442 __ stop("broken stack frame setup in interpreter"); 1443 __ bind(L); 1444 } 1445 #endif 1446 1447 // jvmti support 1448 __ notify_method_entry(); 1449 1450 __ dispatch_next(vtos); 1451 1452 // invocation counter overflow 1453 if (inc_counter) { 1454 if (ProfileInterpreter) { 1455 // We have decided to profile this method in the interpreter 1456 __ bind(profile_method); 1457 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1458 __ set_method_data_pointer_for_bcp(); 1459 // don't think we need this 1460 __ get_method(r1); 1461 __ b(profile_method_continue); 1462 } 1463 // Handle overflow of counter and compile method 1464 __ bind(invocation_counter_overflow); 1465 generate_counter_overflow(&continue_after_compile); 1466 } 1467 1468 return entry_point; 1469 } 1470 1471 // These should never be compiled since the interpreter will prefer 1472 // the compiled version to the intrinsic version. 1473 bool AbstractInterpreter::can_be_compiled(methodHandle m) { 1474 switch (method_kind(m)) { 1475 case Interpreter::java_lang_math_sin : // fall thru 1476 case Interpreter::java_lang_math_cos : // fall thru 1477 case Interpreter::java_lang_math_tan : // fall thru 1478 case Interpreter::java_lang_math_abs : // fall thru 1479 case Interpreter::java_lang_math_log : // fall thru 1480 case Interpreter::java_lang_math_log10 : // fall thru 1481 case Interpreter::java_lang_math_sqrt : // fall thru 1482 case Interpreter::java_lang_math_pow : // fall thru 1483 case Interpreter::java_lang_math_exp : 1484 return false; 1485 default: 1486 return true; 1487 } 1488 } 1489 1490 // How much stack a method activation needs in words. 1491 int AbstractInterpreter::size_top_interpreter_activation(Method* method) { 1492 const int entry_size = frame::interpreter_frame_monitor_size(); 1493 1494 // total overhead size: entry_size + (saved rfp thru expr stack 1495 // bottom). be sure to change this if you add/subtract anything 1496 // to/from the overhead area 1497 const int overhead_size = 1498 -(frame::interpreter_frame_initial_sp_offset) + entry_size; 1499 1500 const int stub_code = frame::entry_frame_after_call_words; 1501 const int method_stack = (method->max_locals() + method->max_stack()) * 1502 Interpreter::stackElementWords; 1503 return (overhead_size + method_stack + stub_code); 1504 } 1505 1506 // asm based interpreter deoptimization helpers 1507 int AbstractInterpreter::size_activation(int max_stack, 1508 int temps, 1509 int extra_args, 1510 int monitors, 1511 int callee_params, 1512 int callee_locals, 1513 bool is_top_frame) { 1514 // Note: This calculation must exactly parallel the frame setup 1515 // in InterpreterGenerator::generate_method_entry. 1516 1517 // fixed size of an interpreter frame: 1518 int overhead = frame::sender_sp_offset - 1519 frame::interpreter_frame_initial_sp_offset; 1520 // Our locals were accounted for by the caller (or last_frame_adjust 1521 // on the transistion) Since the callee parameters already account 1522 // for the callee's params we only need to account for the extra 1523 // locals. 1524 int size = overhead + 1525 (callee_locals - callee_params)*Interpreter::stackElementWords + 1526 monitors * frame::interpreter_frame_monitor_size() + 1527 temps* Interpreter::stackElementWords + extra_args; 1528 1529 // On AArch64 we always keep the stack pointer 16-aligned, so we 1530 // must round up here. 1531 size = round_to(size, 2); 1532 1533 return size; 1534 } 1535 1536 void AbstractInterpreter::layout_activation(Method* method, 1537 int tempcount, 1538 int popframe_extra_args, 1539 int moncount, 1540 int caller_actual_parameters, 1541 int callee_param_count, 1542 int callee_locals, 1543 frame* caller, 1544 frame* interpreter_frame, 1545 bool is_top_frame, 1546 bool is_bottom_frame) { 1547 // The frame interpreter_frame is guaranteed to be the right size, 1548 // as determined by a previous call to the size_activation() method. 1549 // It is also guaranteed to be walkable even though it is in a 1550 // skeletal state 1551 1552 int max_locals = method->max_locals() * Interpreter::stackElementWords; 1553 int extra_locals = (method->max_locals() - method->size_of_parameters()) * 1554 Interpreter::stackElementWords; 1555 1556 #ifdef ASSERT 1557 assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable"); 1558 #endif 1559 1560 interpreter_frame->interpreter_frame_set_method(method); 1561 // NOTE the difference in using sender_sp and 1562 // interpreter_frame_sender_sp interpreter_frame_sender_sp is 1563 // the original sp of the caller (the unextended_sp) and 1564 // sender_sp is fp+8/16 (32bit/64bit) XXX 1565 intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; 1566 1567 #ifdef ASSERT 1568 if (caller->is_interpreted_frame()) { 1569 assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); 1570 } 1571 #endif 1572 1573 interpreter_frame->interpreter_frame_set_locals(locals); 1574 BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); 1575 BasicObjectLock* monbot = montop - moncount; 1576 interpreter_frame->interpreter_frame_set_monitor_end(monbot); 1577 1578 // Set last_sp 1579 intptr_t* esp = (intptr_t*) monbot - 1580 tempcount*Interpreter::stackElementWords - 1581 popframe_extra_args; 1582 interpreter_frame->interpreter_frame_set_last_sp(esp); 1583 1584 // All frames but the initial (oldest) interpreter frame we fill in have 1585 // a value for sender_sp that allows walking the stack but isn't 1586 // truly correct. Correct the value here. 1587 if (extra_locals != 0 && 1588 interpreter_frame->sender_sp() == 1589 interpreter_frame->interpreter_frame_sender_sp()) { 1590 interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + 1591 extra_locals); 1592 } 1593 *interpreter_frame->interpreter_frame_cache_addr() = 1594 method->constants()->cache(); 1595 } 1596 1597 1598 //----------------------------------------------------------------------------- 1599 // Exceptions 1600 1601 void TemplateInterpreterGenerator::generate_throw_exception() { 1602 // Entry point in previous activation (i.e., if the caller was 1603 // interpreted) 1604 Interpreter::_rethrow_exception_entry = __ pc(); 1605 // Restore sp to interpreter_frame_last_sp even though we are going 1606 // to empty the expression stack for the exception processing. 1607 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1608 // r0: exception 1609 // r3: return address/pc that threw exception 1610 __ restore_bcp(); // rbcp points to call/send 1611 __ restore_locals(); 1612 __ restore_constant_pool_cache(); 1613 __ reinit_heapbase(); // restore rheapbase as heapbase. 1614 __ get_dispatch(); 1615 1616 #ifndef PRODUCT 1617 // tell the simulator that the caller method has been reentered 1618 if (NotifySimulator) { 1619 __ get_method(rmethod); 1620 __ notify(Assembler::method_reentry); 1621 } 1622 #endif 1623 // Entry point for exceptions thrown within interpreter code 1624 Interpreter::_throw_exception_entry = __ pc(); 1625 // If we came here via a NullPointerException on the receiver of a 1626 // method, rmethod may be corrupt. 1627 __ get_method(rmethod); 1628 // expression stack is undefined here 1629 // r0: exception 1630 // rbcp: exception bcp 1631 __ verify_oop(r0); 1632 __ mov(c_rarg1, r0); 1633 1634 // expression stack must be empty before entering the VM in case of 1635 // an exception 1636 __ empty_expression_stack(); 1637 // find exception handler address and preserve exception oop 1638 __ call_VM(r3, 1639 CAST_FROM_FN_PTR(address, 1640 InterpreterRuntime::exception_handler_for_exception), 1641 c_rarg1); 1642 1643 // Calculate stack limit 1644 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1645 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1646 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1647 __ ldr(rscratch2, 1648 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1649 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); 1650 __ andr(sp, rscratch1, -16); 1651 1652 // r0: exception handler entry point 1653 // r3: preserved exception oop 1654 // rbcp: bcp for exception handler 1655 __ push_ptr(r3); // push exception which is now the only value on the stack 1656 __ br(r0); // jump to exception handler (may be _remove_activation_entry!) 1657 1658 // If the exception is not handled in the current frame the frame is 1659 // removed and the exception is rethrown (i.e. exception 1660 // continuation is _rethrow_exception). 1661 // 1662 // Note: At this point the bci is still the bxi for the instruction 1663 // which caused the exception and the expression stack is 1664 // empty. Thus, for any VM calls at this point, GC will find a legal 1665 // oop map (with empty expression stack). 1666 1667 // 1668 // JVMTI PopFrame support 1669 // 1670 1671 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1672 __ empty_expression_stack(); 1673 // Set the popframe_processing bit in pending_popframe_condition 1674 // indicating that we are currently handling popframe, so that 1675 // call_VMs that may happen later do not trigger new popframe 1676 // handling cycles. 1677 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1678 __ orr(r3, r3, JavaThread::popframe_processing_bit); 1679 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); 1680 1681 { 1682 // Check to see whether we are returning to a deoptimized frame. 1683 // (The PopFrame call ensures that the caller of the popped frame is 1684 // either interpreted or compiled and deoptimizes it if compiled.) 1685 // In this case, we can't call dispatch_next() after the frame is 1686 // popped, but instead must save the incoming arguments and restore 1687 // them after deoptimization has occurred. 1688 // 1689 // Note that we don't compare the return PC against the 1690 // deoptimization blob's unpack entry because of the presence of 1691 // adapter frames in C2. 1692 Label caller_not_deoptimized; 1693 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); 1694 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1695 InterpreterRuntime::interpreter_contains), c_rarg1); 1696 __ cbnz(r0, caller_not_deoptimized); 1697 1698 // Compute size of arguments for saving when returning to 1699 // deoptimized caller 1700 __ get_method(r0); 1701 __ ldr(r0, Address(r0, Method::const_offset())); 1702 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: 1703 size_of_parameters_offset()))); 1704 __ lsl(r0, r0, Interpreter::logStackElementSize); 1705 __ restore_locals(); // XXX do we need this? 1706 __ sub(rlocals, rlocals, r0); 1707 __ add(rlocals, rlocals, wordSize); 1708 // Save these arguments 1709 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1710 Deoptimization:: 1711 popframe_preserve_args), 1712 rthread, r0, rlocals); 1713 1714 __ remove_activation(vtos, 1715 /* throw_monitor_exception */ false, 1716 /* install_monitor_exception */ false, 1717 /* notify_jvmdi */ false); 1718 1719 // Inform deoptimization that it is responsible for restoring 1720 // these arguments 1721 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); 1722 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); 1723 1724 // Continue in deoptimization handler 1725 __ ret(lr); 1726 1727 __ bind(caller_not_deoptimized); 1728 } 1729 1730 __ remove_activation(vtos, 1731 /* throw_monitor_exception */ false, 1732 /* install_monitor_exception */ false, 1733 /* notify_jvmdi */ false); 1734 1735 // Restore the last_sp and null it out 1736 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1737 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); 1738 1739 __ restore_bcp(); 1740 __ restore_locals(); 1741 __ restore_constant_pool_cache(); 1742 __ get_method(rmethod); 1743 1744 // The method data pointer was incremented already during 1745 // call profiling. We have to restore the mdp for the current bcp. 1746 if (ProfileInterpreter) { 1747 __ set_method_data_pointer_for_bcp(); 1748 } 1749 1750 // Clear the popframe condition flag 1751 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); 1752 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); 1753 1754 #if INCLUDE_JVMTI 1755 { 1756 Label L_done; 1757 1758 __ ldrb(rscratch1, Address(rbcp, 0)); 1759 __ cmpw(r1, Bytecodes::_invokestatic); 1760 __ br(Assembler::EQ, L_done); 1761 1762 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1763 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1764 1765 __ ldr(c_rarg0, Address(rlocals, 0)); 1766 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); 1767 1768 __ cbz(r0, L_done); 1769 1770 __ str(r0, Address(esp, 0)); 1771 __ bind(L_done); 1772 } 1773 #endif // INCLUDE_JVMTI 1774 1775 // Restore machine SP 1776 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); 1777 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); 1778 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); 1779 __ ldr(rscratch2, 1780 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); 1781 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); 1782 __ andr(sp, rscratch1, -16); 1783 1784 __ dispatch_next(vtos); 1785 // end of PopFrame support 1786 1787 Interpreter::_remove_activation_entry = __ pc(); 1788 1789 // preserve exception over this code sequence 1790 __ pop_ptr(r0); 1791 __ str(r0, Address(rthread, JavaThread::vm_result_offset())); 1792 // remove the activation (without doing throws on illegalMonitorExceptions) 1793 __ remove_activation(vtos, false, true, false); 1794 // restore exception 1795 // restore exception 1796 __ get_vm_result(r0, rthread); 1797 1798 // In between activations - previous activation type unknown yet 1799 // compute continuation point - the continuation point expects the 1800 // following registers set up: 1801 // 1802 // r0: exception 1803 // lr: return address/pc that threw exception 1804 // rsp: expression stack of caller 1805 // rfp: fp of caller 1806 // FIXME: There's no point saving LR here because VM calls don't trash it 1807 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address 1808 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, 1809 SharedRuntime::exception_handler_for_return_address), 1810 rthread, lr); 1811 __ mov(r1, r0); // save exception handler 1812 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address 1813 // We might be returning to a deopt handler that expects r3 to 1814 // contain the exception pc 1815 __ mov(r3, lr); 1816 // Note that an "issuing PC" is actually the next PC after the call 1817 __ br(r1); // jump to exception 1818 // handler of caller 1819 } 1820 1821 1822 // 1823 // JVMTI ForceEarlyReturn support 1824 // 1825 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1826 address entry = __ pc(); 1827 1828 __ restore_bcp(); 1829 __ restore_locals(); 1830 __ empty_expression_stack(); 1831 __ load_earlyret_value(state); 1832 1833 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); 1834 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); 1835 1836 // Clear the earlyret state 1837 assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); 1838 __ str(zr, cond_addr); 1839 1840 __ remove_activation(state, 1841 false, /* throw_monitor_exception */ 1842 false, /* install_monitor_exception */ 1843 true); /* notify_jvmdi */ 1844 __ ret(lr); 1845 1846 return entry; 1847 } // end of ForceEarlyReturn support 1848 1849 1850 1851 //----------------------------------------------------------------------------- 1852 // Helper for vtos entry point generation 1853 1854 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 1855 address& bep, 1856 address& cep, 1857 address& sep, 1858 address& aep, 1859 address& iep, 1860 address& lep, 1861 address& fep, 1862 address& dep, 1863 address& vep) { 1864 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1865 Label L; 1866 aep = __ pc(); __ push_ptr(); __ b(L); 1867 fep = __ pc(); __ push_f(); __ b(L); 1868 dep = __ pc(); __ push_d(); __ b(L); 1869 lep = __ pc(); __ push_l(); __ b(L); 1870 bep = cep = sep = 1871 iep = __ pc(); __ push_i(); 1872 vep = __ pc(); 1873 __ bind(L); 1874 generate_and_dispatch(t); 1875 } 1876 1877 //----------------------------------------------------------------------------- 1878 // Generation of individual instructions 1879 1880 // helpers for generate_and_dispatch 1881 1882 1883 InterpreterGenerator::InterpreterGenerator(StubQueue* code) 1884 : TemplateInterpreterGenerator(code) { 1885 generate_all(); // down here so it can be "virtual" 1886 } 1887 1888 //----------------------------------------------------------------------------- 1889 1890 // Non-product code 1891 #ifndef PRODUCT 1892 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1893 address entry = __ pc(); 1894 1895 __ push(lr); 1896 __ push(state); 1897 __ push(RegSet::range(r0, r15), sp); 1898 __ mov(c_rarg2, r0); // Pass itos 1899 __ call_VM(noreg, 1900 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), 1901 c_rarg1, c_rarg2, c_rarg3); 1902 __ pop(RegSet::range(r0, r15), sp); 1903 __ pop(state); 1904 __ pop(lr); 1905 __ ret(lr); // return from result handler 1906 1907 return entry; 1908 } 1909 1910 void TemplateInterpreterGenerator::count_bytecode() { 1911 Register rscratch3 = r0; 1912 __ push(rscratch1); 1913 __ push(rscratch2); 1914 __ push(rscratch3); 1915 Label L; 1916 __ mov(rscratch2, (address) &BytecodeCounter::_counter_value); 1917 __ bind(L); 1918 __ ldxr(rscratch1, rscratch2); 1919 __ add(rscratch1, rscratch1, 1); 1920 __ stxr(rscratch3, rscratch1, rscratch2); 1921 __ cbnzw(rscratch3, L); 1922 __ pop(rscratch3); 1923 __ pop(rscratch2); 1924 __ pop(rscratch1); 1925 } 1926 1927 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } 1928 1929 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } 1930 1931 1932 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1933 // Call a little run-time stub to avoid blow-up for each bytecode. 1934 // The run-time runtime saves the right registers, depending on 1935 // the tosca in-state for the given template. 1936 1937 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1938 "entry must have been generated"); 1939 __ bl(Interpreter::trace_code(t->tos_in())); 1940 __ reinit_heapbase(); 1941 } 1942 1943 1944 void TemplateInterpreterGenerator::stop_interpreter_at() { 1945 Label L; 1946 __ push(rscratch1); 1947 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value); 1948 __ ldr(rscratch1, Address(rscratch1)); 1949 __ mov(rscratch2, StopInterpreterAt); 1950 __ cmpw(rscratch1, rscratch2); 1951 __ br(Assembler::NE, L); 1952 __ brk(0); 1953 __ bind(L); 1954 __ pop(rscratch1); 1955 } 1956 1957 #ifdef BUILTIN_SIM 1958 1959 #include <sys/mman.h> 1960 #include <unistd.h> 1961 1962 extern "C" { 1963 static int PAGESIZE = getpagesize(); 1964 int is_mapped_address(u_int64_t address) 1965 { 1966 address = (address & ~((u_int64_t)PAGESIZE - 1)); 1967 if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) { 1968 return true; 1969 } 1970 if (errno != ENOMEM) { 1971 return true; 1972 } 1973 return false; 1974 } 1975 1976 void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 1977 { 1978 if (method != 0) { 1979 method[0] = '\0'; 1980 } 1981 if (bcidx != 0) { 1982 *bcidx = -2; 1983 } 1984 if (decode != 0) { 1985 decode[0] = 0; 1986 } 1987 1988 if (framesize != 0) { 1989 *framesize = -1; 1990 } 1991 1992 if (Interpreter::contains((address)pc)) { 1993 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); 1994 Method* meth; 1995 address bcp; 1996 if (fp) { 1997 #define FRAME_SLOT_METHOD 3 1998 #define FRAME_SLOT_BCP 7 1999 meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3)); 2000 bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3)); 2001 #undef FRAME_SLOT_METHOD 2002 #undef FRAME_SLOT_BCP 2003 } else { 2004 meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0); 2005 bcp = (address)sim->getCPUState().xreg(RBCP, 0); 2006 } 2007 if (meth->is_native()) { 2008 return; 2009 } 2010 if(method && meth->is_method()) { 2011 ResourceMark rm; 2012 method[0] = 'I'; 2013 method[1] = ' '; 2014 meth->name_and_sig_as_C_string(method + 2, 398); 2015 } 2016 if (bcidx) { 2017 if (meth->contains(bcp)) { 2018 *bcidx = meth->bci_from(bcp); 2019 } else { 2020 *bcidx = -2; 2021 } 2022 } 2023 if (decode) { 2024 if (!BytecodeTracer::closure()) { 2025 BytecodeTracer::set_closure(BytecodeTracer::std_closure()); 2026 } 2027 stringStream str(decode, 400); 2028 BytecodeTracer::trace(meth, bcp, &str); 2029 } 2030 } else { 2031 if (method) { 2032 CodeBlob *cb = CodeCache::find_blob((address)pc); 2033 if (cb != NULL) { 2034 if (cb->is_nmethod()) { 2035 ResourceMark rm; 2036 nmethod* nm = (nmethod*)cb; 2037 method[0] = 'C'; 2038 method[1] = ' '; 2039 nm->method()->name_and_sig_as_C_string(method + 2, 398); 2040 } else if (cb->is_adapter_blob()) { 2041 strcpy(method, "B adapter blob"); 2042 } else if (cb->is_runtime_stub()) { 2043 strcpy(method, "B runtime stub"); 2044 } else if (cb->is_exception_stub()) { 2045 strcpy(method, "B exception stub"); 2046 } else if (cb->is_deoptimization_stub()) { 2047 strcpy(method, "B deoptimization stub"); 2048 } else if (cb->is_safepoint_stub()) { 2049 strcpy(method, "B safepoint stub"); 2050 } else if (cb->is_uncommon_trap_stub()) { 2051 strcpy(method, "B uncommon trap stub"); 2052 } else if (cb->contains((address)StubRoutines::call_stub())) { 2053 strcpy(method, "B call stub"); 2054 } else { 2055 strcpy(method, "B unknown blob : "); 2056 strcat(method, cb->name()); 2057 } 2058 if (framesize != NULL) { 2059 *framesize = cb->frame_size(); 2060 } 2061 } 2062 } 2063 } 2064 } 2065 2066 2067 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) 2068 { 2069 bccheck1(pc, fp, method, bcidx, framesize, decode); 2070 } 2071 } 2072 2073 #endif // BUILTIN_SIM 2074 #endif // !PRODUCT 2075 #endif // ! CC_INTERP