rev 55858 : 8228649: [PPC64] SA reads wrong slots from interpreter frames Summary: Make frame layout consistent between dbg and product build and implement offsets accordingly. Reviewed-by: goetz, gromero
1 /* 2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, 2019, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/bytecodeHistogram.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "interpreter/interpreterRuntime.hpp" 32 #include "interpreter/interp_masm.hpp" 33 #include "interpreter/templateInterpreterGenerator.hpp" 34 #include "interpreter/templateTable.hpp" 35 #include "oops/arrayOop.hpp" 36 #include "oops/methodData.hpp" 37 #include "oops/method.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "prims/jvmtiThreadState.hpp" 41 #include "runtime/arguments.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/frame.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/timer.hpp" 48 #include "runtime/vframeArray.hpp" 49 #include "utilities/debug.hpp" 50 #include "utilities/macros.hpp" 51 52 #undef __ 53 #define __ _masm-> 54 55 // Size of interpreter code. Increase if too small. Interpreter will 56 // fail with a guarantee ("not enough space for interpreter generation"); 57 // if too small. 58 // Run with +PrintInterpreter to get the VM to print out the size. 59 // Max size with JVMTI 60 int TemplateInterpreter::InterpreterCodeSize = 256*K; 61 62 #ifdef PRODUCT 63 #define BLOCK_COMMENT(str) /* nothing */ 64 #else 65 #define BLOCK_COMMENT(str) __ block_comment(str) 66 #endif 67 68 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") 69 70 //----------------------------------------------------------------------------- 71 72 address TemplateInterpreterGenerator::generate_slow_signature_handler() { 73 // Slow_signature handler that respects the PPC C calling conventions. 74 // 75 // We get called by the native entry code with our output register 76 // area == 8. First we call InterpreterRuntime::get_result_handler 77 // to copy the pointer to the signature string temporarily to the 78 // first C-argument and to return the result_handler in 79 // R3_RET. Since native_entry will copy the jni-pointer to the 80 // first C-argument slot later on, it is OK to occupy this slot 81 // temporarilly. Then we copy the argument list on the java 82 // expression stack into native varargs format on the native stack 83 // and load arguments into argument registers. Integer arguments in 84 // the varargs vector will be sign-extended to 8 bytes. 85 // 86 // On entry: 87 // R3_ARG1 - intptr_t* Address of java argument list in memory. 88 // R15_prev_state - BytecodeInterpreter* Address of interpreter state for 89 // this method 90 // R19_method 91 // 92 // On exit (just before return instruction): 93 // R3_RET - contains the address of the result_handler. 94 // R4_ARG2 - is not updated for static methods and contains "this" otherwise. 95 // R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double, 96 // ARGi contains this argument. Otherwise, ARGi is not updated. 97 // F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double. 98 99 const int LogSizeOfTwoInstructions = 3; 100 101 // FIXME: use Argument:: GL: Argument names different numbers! 102 const int max_fp_register_arguments = 13; 103 const int max_int_register_arguments = 6; // first 2 are reserved 104 105 const Register arg_java = R21_tmp1; 106 const Register arg_c = R22_tmp2; 107 const Register signature = R23_tmp3; // is string 108 const Register sig_byte = R24_tmp4; 109 const Register fpcnt = R25_tmp5; 110 const Register argcnt = R26_tmp6; 111 const Register intSlot = R27_tmp7; 112 const Register target_sp = R28_tmp8; 113 const FloatRegister floatSlot = F0; 114 115 address entry = __ function_entry(); 116 117 __ save_LR_CR(R0); 118 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 119 // We use target_sp for storing arguments in the C frame. 120 __ mr(target_sp, R1_SP); 121 __ push_frame_reg_args_nonvolatiles(0, R11_scratch1); 122 123 __ mr(arg_java, R3_ARG1); 124 125 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method); 126 127 // Signature is in R3_RET. Signature is callee saved. 128 __ mr(signature, R3_RET); 129 130 // Get the result handler. 131 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method); 132 133 { 134 Label L; 135 // test if static 136 // _access_flags._flags must be at offset 0. 137 // TODO PPC port: requires change in shared code. 138 //assert(in_bytes(AccessFlags::flags_offset()) == 0, 139 // "MethodDesc._access_flags == MethodDesc._access_flags._flags"); 140 // _access_flags must be a 32 bit value. 141 assert(sizeof(AccessFlags) == 4, "wrong size"); 142 __ lwa(R11_scratch1/*access_flags*/, method_(access_flags)); 143 // testbit with condition register. 144 __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT); 145 __ btrue(CCR0, L); 146 // For non-static functions, pass "this" in R4_ARG2 and copy it 147 // to 2nd C-arg slot. 148 // We need to box the Java object here, so we use arg_java 149 // (address of current Java stack slot) as argument and don't 150 // dereference it as in case of ints, floats, etc. 151 __ mr(R4_ARG2, arg_java); 152 __ addi(arg_java, arg_java, -BytesPerWord); 153 __ std(R4_ARG2, _abi(carg_2), target_sp); 154 __ bind(L); 155 } 156 157 // Will be incremented directly after loop_start. argcnt=0 158 // corresponds to 3rd C argument. 159 __ li(argcnt, -1); 160 // arg_c points to 3rd C argument 161 __ addi(arg_c, target_sp, _abi(carg_3)); 162 // no floating-point args parsed so far 163 __ li(fpcnt, 0); 164 165 Label move_intSlot_to_ARG, move_floatSlot_to_FARG; 166 Label loop_start, loop_end; 167 Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed; 168 169 // signature points to '(' at entry 170 #ifdef ASSERT 171 __ lbz(sig_byte, 0, signature); 172 __ cmplwi(CCR0, sig_byte, '('); 173 __ bne(CCR0, do_dontreachhere); 174 #endif 175 176 __ bind(loop_start); 177 178 __ addi(argcnt, argcnt, 1); 179 __ lbzu(sig_byte, 1, signature); 180 181 __ cmplwi(CCR0, sig_byte, ')'); // end of signature 182 __ beq(CCR0, loop_end); 183 184 __ cmplwi(CCR0, sig_byte, 'B'); // byte 185 __ beq(CCR0, do_int); 186 187 __ cmplwi(CCR0, sig_byte, 'C'); // char 188 __ beq(CCR0, do_int); 189 190 __ cmplwi(CCR0, sig_byte, 'D'); // double 191 __ beq(CCR0, do_double); 192 193 __ cmplwi(CCR0, sig_byte, 'F'); // float 194 __ beq(CCR0, do_float); 195 196 __ cmplwi(CCR0, sig_byte, 'I'); // int 197 __ beq(CCR0, do_int); 198 199 __ cmplwi(CCR0, sig_byte, 'J'); // long 200 __ beq(CCR0, do_long); 201 202 __ cmplwi(CCR0, sig_byte, 'S'); // short 203 __ beq(CCR0, do_int); 204 205 __ cmplwi(CCR0, sig_byte, 'Z'); // boolean 206 __ beq(CCR0, do_int); 207 208 __ cmplwi(CCR0, sig_byte, 'L'); // object 209 __ beq(CCR0, do_object); 210 211 __ cmplwi(CCR0, sig_byte, '['); // array 212 __ beq(CCR0, do_array); 213 214 // __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type 215 // __ beq(CCR0, do_void); 216 217 __ bind(do_dontreachhere); 218 219 __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120); 220 221 __ bind(do_array); 222 223 { 224 Label start_skip, end_skip; 225 226 __ bind(start_skip); 227 __ lbzu(sig_byte, 1, signature); 228 __ cmplwi(CCR0, sig_byte, '['); 229 __ beq(CCR0, start_skip); // skip further brackets 230 __ cmplwi(CCR0, sig_byte, '9'); 231 __ bgt(CCR0, end_skip); // no optional size 232 __ cmplwi(CCR0, sig_byte, '0'); 233 __ bge(CCR0, start_skip); // skip optional size 234 __ bind(end_skip); 235 236 __ cmplwi(CCR0, sig_byte, 'L'); 237 __ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped 238 __ b(do_boxed); // otherwise, go directly to do_boxed 239 } 240 241 __ bind(do_object); 242 { 243 Label L; 244 __ bind(L); 245 __ lbzu(sig_byte, 1, signature); 246 __ cmplwi(CCR0, sig_byte, ';'); 247 __ bne(CCR0, L); 248 } 249 // Need to box the Java object here, so we use arg_java (address of 250 // current Java stack slot) as argument and don't dereference it as 251 // in case of ints, floats, etc. 252 Label do_null; 253 __ bind(do_boxed); 254 __ ld(R0,0, arg_java); 255 __ cmpdi(CCR0, R0, 0); 256 __ li(intSlot,0); 257 __ beq(CCR0, do_null); 258 __ mr(intSlot, arg_java); 259 __ bind(do_null); 260 __ std(intSlot, 0, arg_c); 261 __ addi(arg_java, arg_java, -BytesPerWord); 262 __ addi(arg_c, arg_c, BytesPerWord); 263 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 264 __ blt(CCR0, move_intSlot_to_ARG); 265 __ b(loop_start); 266 267 __ bind(do_int); 268 __ lwa(intSlot, 0, arg_java); 269 __ std(intSlot, 0, arg_c); 270 __ addi(arg_java, arg_java, -BytesPerWord); 271 __ addi(arg_c, arg_c, BytesPerWord); 272 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 273 __ blt(CCR0, move_intSlot_to_ARG); 274 __ b(loop_start); 275 276 __ bind(do_long); 277 __ ld(intSlot, -BytesPerWord, arg_java); 278 __ std(intSlot, 0, arg_c); 279 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 280 __ addi(arg_c, arg_c, BytesPerWord); 281 __ cmplwi(CCR0, argcnt, max_int_register_arguments); 282 __ blt(CCR0, move_intSlot_to_ARG); 283 __ b(loop_start); 284 285 __ bind(do_float); 286 __ lfs(floatSlot, 0, arg_java); 287 #if defined(LINUX) 288 // Linux uses ELF ABI. Both original ELF and ELFv2 ABIs have float 289 // in the least significant word of an argument slot. 290 #if defined(VM_LITTLE_ENDIAN) 291 __ stfs(floatSlot, 0, arg_c); 292 #else 293 __ stfs(floatSlot, 4, arg_c); 294 #endif 295 #elif defined(AIX) 296 // Although AIX runs on big endian CPU, float is in most significant 297 // word of an argument slot. 298 __ stfs(floatSlot, 0, arg_c); 299 #else 300 #error "unknown OS" 301 #endif 302 __ addi(arg_java, arg_java, -BytesPerWord); 303 __ addi(arg_c, arg_c, BytesPerWord); 304 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); 305 __ blt(CCR0, move_floatSlot_to_FARG); 306 __ b(loop_start); 307 308 __ bind(do_double); 309 __ lfd(floatSlot, - BytesPerWord, arg_java); 310 __ stfd(floatSlot, 0, arg_c); 311 __ addi(arg_java, arg_java, - 2 * BytesPerWord); 312 __ addi(arg_c, arg_c, BytesPerWord); 313 __ cmplwi(CCR0, fpcnt, max_fp_register_arguments); 314 __ blt(CCR0, move_floatSlot_to_FARG); 315 __ b(loop_start); 316 317 __ bind(loop_end); 318 319 __ pop_frame(); 320 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 321 __ restore_LR_CR(R0); 322 323 __ blr(); 324 325 Label move_int_arg, move_float_arg; 326 __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 327 __ mr(R5_ARG3, intSlot); __ b(loop_start); 328 __ mr(R6_ARG4, intSlot); __ b(loop_start); 329 __ mr(R7_ARG5, intSlot); __ b(loop_start); 330 __ mr(R8_ARG6, intSlot); __ b(loop_start); 331 __ mr(R9_ARG7, intSlot); __ b(loop_start); 332 __ mr(R10_ARG8, intSlot); __ b(loop_start); 333 334 __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions) 335 __ fmr(F1_ARG1, floatSlot); __ b(loop_start); 336 __ fmr(F2_ARG2, floatSlot); __ b(loop_start); 337 __ fmr(F3_ARG3, floatSlot); __ b(loop_start); 338 __ fmr(F4_ARG4, floatSlot); __ b(loop_start); 339 __ fmr(F5_ARG5, floatSlot); __ b(loop_start); 340 __ fmr(F6_ARG6, floatSlot); __ b(loop_start); 341 __ fmr(F7_ARG7, floatSlot); __ b(loop_start); 342 __ fmr(F8_ARG8, floatSlot); __ b(loop_start); 343 __ fmr(F9_ARG9, floatSlot); __ b(loop_start); 344 __ fmr(F10_ARG10, floatSlot); __ b(loop_start); 345 __ fmr(F11_ARG11, floatSlot); __ b(loop_start); 346 __ fmr(F12_ARG12, floatSlot); __ b(loop_start); 347 __ fmr(F13_ARG13, floatSlot); __ b(loop_start); 348 349 __ bind(move_intSlot_to_ARG); 350 __ sldi(R0, argcnt, LogSizeOfTwoInstructions); 351 __ load_const(R11_scratch1, move_int_arg); // Label must be bound here. 352 __ add(R11_scratch1, R0, R11_scratch1); 353 __ mtctr(R11_scratch1/*branch_target*/); 354 __ bctr(); 355 __ bind(move_floatSlot_to_FARG); 356 __ sldi(R0, fpcnt, LogSizeOfTwoInstructions); 357 __ addi(fpcnt, fpcnt, 1); 358 __ load_const(R11_scratch1, move_float_arg); // Label must be bound here. 359 __ add(R11_scratch1, R0, R11_scratch1); 360 __ mtctr(R11_scratch1/*branch_target*/); 361 __ bctr(); 362 363 return entry; 364 } 365 366 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 367 // 368 // Registers alive 369 // R3_RET 370 // LR 371 // 372 // Registers updated 373 // R3_RET 374 // 375 376 Label done; 377 address entry = __ pc(); 378 379 switch (type) { 380 case T_BOOLEAN: 381 // convert !=0 to 1 382 __ neg(R0, R3_RET); 383 __ orr(R0, R3_RET, R0); 384 __ srwi(R3_RET, R0, 31); 385 break; 386 case T_BYTE: 387 // sign extend 8 bits 388 __ extsb(R3_RET, R3_RET); 389 break; 390 case T_CHAR: 391 // zero extend 16 bits 392 __ clrldi(R3_RET, R3_RET, 48); 393 break; 394 case T_SHORT: 395 // sign extend 16 bits 396 __ extsh(R3_RET, R3_RET); 397 break; 398 case T_INT: 399 // sign extend 32 bits 400 __ extsw(R3_RET, R3_RET); 401 break; 402 case T_LONG: 403 break; 404 case T_OBJECT: 405 // JNIHandles::resolve result. 406 __ resolve_jobject(R3_RET, R11_scratch1, R31, /* needs_frame */ true); // kills R31 407 break; 408 case T_FLOAT: 409 break; 410 case T_DOUBLE: 411 break; 412 case T_VOID: 413 break; 414 default: ShouldNotReachHere(); 415 } 416 417 BIND(done); 418 __ blr(); 419 420 return entry; 421 } 422 423 // Abstract method entry. 424 // 425 address TemplateInterpreterGenerator::generate_abstract_entry(void) { 426 address entry = __ pc(); 427 428 // 429 // Registers alive 430 // R16_thread - JavaThread* 431 // R19_method - callee's method (method to be invoked) 432 // R1_SP - SP prepared such that caller's outgoing args are near top 433 // LR - return address to caller 434 // 435 // Stack layout at this point: 436 // 437 // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP 438 // alignment (optional) 439 // [outgoing Java arguments] 440 // ... 441 // PARENT [PARENT_IJAVA_FRAME_ABI] 442 // ... 443 // 444 445 // Can't use call_VM here because we have not set up a new 446 // interpreter state. Make the call to the vm and make it look like 447 // our caller set up the JavaFrameAnchor. 448 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 449 450 // Push a new C frame and save LR. 451 __ save_LR_CR(R0); 452 __ push_frame_reg_args(0, R11_scratch1); 453 454 // This is not a leaf but we have a JavaFrameAnchor now and we will 455 // check (create) exceptions afterward so this is ok. 456 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), 457 R16_thread, R19_method); 458 459 // Pop the C frame and restore LR. 460 __ pop_frame(); 461 __ restore_LR_CR(R0); 462 463 // Reset JavaFrameAnchor from call_VM_leaf above. 464 __ reset_last_Java_frame(); 465 466 // We don't know our caller, so jump to the general forward exception stub, 467 // which will also pop our full frame off. Satisfy the interface of 468 // SharedRuntime::generate_forward_exception() 469 __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); 470 __ mtctr(R11_scratch1); 471 __ bctr(); 472 473 return entry; 474 } 475 476 // Interpreter intrinsic for WeakReference.get(). 477 // 1. Don't push a full blown frame and go on dispatching, but fetch the value 478 // into R8 and return quickly 479 // 2. If G1 is active we *must* execute this intrinsic for corrrectness: 480 // It contains a GC barrier which puts the reference into the satb buffer 481 // to indicate that someone holds a strong reference to the object the 482 // weak ref points to! 483 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 484 // Code: _aload_0, _getfield, _areturn 485 // parameter size = 1 486 // 487 // The code that gets generated by this routine is split into 2 parts: 488 // 1. the "intrinsified" code for G1 (or any SATB based GC), 489 // 2. the slow path - which is an expansion of the regular method entry. 490 // 491 // Notes: 492 // * In the G1 code we do not check whether we need to block for 493 // a safepoint. If G1 is enabled then we must execute the specialized 494 // code for Reference.get (except when the Reference object is null) 495 // so that we can log the value in the referent field with an SATB 496 // update buffer. 497 // If the code for the getfield template is modified so that the 498 // G1 pre-barrier code is executed when the current method is 499 // Reference.get() then going through the normal method entry 500 // will be fine. 501 // * The G1 code can, however, check the receiver object (the instance 502 // of java.lang.Reference) and jump to the slow path if null. If the 503 // Reference object is null then we obviously cannot fetch the referent 504 // and so we don't need to call the G1 pre-barrier. Thus we can use the 505 // regular method entry code to generate the NPE. 506 // 507 508 address entry = __ pc(); 509 510 const int referent_offset = java_lang_ref_Reference::referent_offset; 511 guarantee(referent_offset > 0, "referent offset not initialized"); 512 513 Label slow_path; 514 515 // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH); 516 517 // In the G1 code we don't check if we need to reach a safepoint. We 518 // continue and the thread will safepoint at the next bytecode dispatch. 519 520 // If the receiver is null then it is OK to jump to the slow path. 521 __ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver 522 523 // Check if receiver == NULL and go the slow path. 524 __ cmpdi(CCR0, R3_RET, 0); 525 __ beq(CCR0, slow_path); 526 527 __ load_heap_oop(R3_RET, referent_offset, R3_RET, 528 /* non-volatile temp */ R31, R11_scratch1, true, ON_WEAK_OOP_REF); 529 530 // Generate the G1 pre-barrier code to log the value of 531 // the referent field in an SATB buffer. Note with 532 // these parameters the pre-barrier does not generate 533 // the load of the previous value. 534 535 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 536 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 537 538 __ blr(); 539 540 __ bind(slow_path); 541 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); 542 return entry; 543 } 544 545 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 546 address entry = __ pc(); 547 548 // Expression stack must be empty before entering the VM if an 549 // exception happened. 550 __ empty_expression_stack(); 551 // Throw exception. 552 __ call_VM(noreg, 553 CAST_FROM_FN_PTR(address, 554 InterpreterRuntime::throw_StackOverflowError)); 555 return entry; 556 } 557 558 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() { 559 address entry = __ pc(); 560 __ empty_expression_stack(); 561 // R4_ARG2 already contains the array. 562 // Index is in R17_tos. 563 __ mr(R5_ARG3, R17_tos); 564 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R4_ARG2, R5_ARG3); 565 return entry; 566 } 567 568 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { 569 address entry = __ pc(); 570 // Expression stack must be empty before entering the VM if an 571 // exception happened. 572 __ empty_expression_stack(); 573 574 // Load exception object. 575 // Thread will be loaded to R3_ARG1. 576 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); 577 #ifdef ASSERT 578 // Above call must not return here since exception pending. 579 __ should_not_reach_here(); 580 #endif 581 return entry; 582 } 583 584 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 585 address entry = __ pc(); 586 //__ untested("generate_exception_handler_common"); 587 Register Rexception = R17_tos; 588 589 // Expression stack must be empty before entering the VM if an exception happened. 590 __ empty_expression_stack(); 591 592 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); 593 if (pass_oop) { 594 __ mr(R5_ARG3, Rexception); 595 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false); 596 } else { 597 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); 598 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false); 599 } 600 601 // Throw exception. 602 __ mr(R3_ARG1, Rexception); 603 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); 604 __ mtctr(R11_scratch1); 605 __ bctr(); 606 607 return entry; 608 } 609 610 // This entry is returned to when a call returns to the interpreter. 611 // When we arrive here, we expect that the callee stack frame is already popped. 612 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 613 address entry = __ pc(); 614 615 // Move the value out of the return register back to the TOS cache of current frame. 616 switch (state) { 617 case ltos: 618 case btos: 619 case ztos: 620 case ctos: 621 case stos: 622 case atos: 623 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache 624 case ftos: 625 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 626 case vtos: break; // Nothing to do, this was a void return. 627 default : ShouldNotReachHere(); 628 } 629 630 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 631 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 632 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 633 634 // Compiled code destroys templateTableBase, reload. 635 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); 636 637 if (state == atos) { 638 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); 639 } 640 641 const Register cache = R11_scratch1; 642 const Register size = R12_scratch2; 643 __ get_cache_and_index_at_bcp(cache, 1, index_size); 644 645 // Get least significant byte of 64 bit value: 646 #if defined(VM_LITTLE_ENDIAN) 647 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache); 648 #else 649 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); 650 #endif 651 __ sldi(size, size, Interpreter::logStackElementSize); 652 __ add(R15_esp, R15_esp, size); 653 654 __ check_and_handle_popframe(R11_scratch1); 655 __ check_and_handle_earlyret(R11_scratch1); 656 657 __ dispatch_next(state, step); 658 return entry; 659 } 660 661 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) { 662 address entry = __ pc(); 663 // If state != vtos, we're returning from a native method, which put it's result 664 // into the result register. So move the value out of the return register back 665 // to the TOS cache of current frame. 666 667 switch (state) { 668 case ltos: 669 case btos: 670 case ztos: 671 case ctos: 672 case stos: 673 case atos: 674 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache 675 case ftos: 676 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET 677 case vtos: break; // Nothing to do, this was a void return. 678 default : ShouldNotReachHere(); 679 } 680 681 // Load LcpoolCache @@@ should be already set! 682 __ get_constant_pool_cache(R27_constPoolCache); 683 684 // Handle a pending exception, fall through if none. 685 __ check_and_forward_exception(R11_scratch1, R12_scratch2); 686 687 // Start executing bytecodes. 688 if (continuation == NULL) { 689 __ dispatch_next(state, step); 690 } else { 691 __ jump_to_entry(continuation, R11_scratch1); 692 } 693 694 return entry; 695 } 696 697 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 698 address entry = __ pc(); 699 700 __ push(state); 701 __ call_VM(noreg, runtime_entry); 702 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); 703 704 return entry; 705 } 706 707 // Helpers for commoning out cases in the various type of method entries. 708 709 // Increment invocation count & check for overflow. 710 // 711 // Note: checking for negative value instead of overflow 712 // so we have a 'sticky' overflow test. 713 // 714 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { 715 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. 716 Register Rscratch1 = R11_scratch1; 717 Register Rscratch2 = R12_scratch2; 718 Register R3_counters = R3_ARG1; 719 Label done; 720 721 if (TieredCompilation) { 722 const int increment = InvocationCounter::count_increment; 723 Label no_mdo; 724 if (ProfileInterpreter) { 725 const Register Rmdo = R3_counters; 726 // If no method data exists, go to profile_continue. 727 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 728 __ cmpdi(CCR0, Rmdo, 0); 729 __ beq(CCR0, no_mdo); 730 731 // Increment invocation counter in the MDO. 732 const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 733 __ lwz(Rscratch2, mdo_ic_offs, Rmdo); 734 __ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo); 735 __ addi(Rscratch2, Rscratch2, increment); 736 __ stw(Rscratch2, mdo_ic_offs, Rmdo); 737 __ and_(Rscratch1, Rscratch2, Rscratch1); 738 __ bne(CCR0, done); 739 __ b(*overflow); 740 } 741 742 // Increment counter in MethodCounters*. 743 const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 744 __ bind(no_mdo); 745 __ get_method_counters(R19_method, R3_counters, done); 746 __ lwz(Rscratch2, mo_ic_offs, R3_counters); 747 __ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters); 748 __ addi(Rscratch2, Rscratch2, increment); 749 __ stw(Rscratch2, mo_ic_offs, R3_counters); 750 __ and_(Rscratch1, Rscratch2, Rscratch1); 751 __ beq(CCR0, *overflow); 752 753 __ bind(done); 754 755 } else { 756 757 // Update standard invocation counters. 758 Register Rsum_ivc_bec = R4_ARG2; 759 __ get_method_counters(R19_method, R3_counters, done); 760 __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2); 761 // Increment interpreter invocation counter. 762 if (ProfileInterpreter) { // %%% Merge this into methodDataOop. 763 __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 764 __ addi(R12_scratch2, R12_scratch2, 1); 765 __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); 766 } 767 // Check if we must create a method data obj. 768 if (ProfileInterpreter && profile_method != NULL) { 769 const Register profile_limit = Rscratch1; 770 __ lwz(profile_limit, in_bytes(MethodCounters::interpreter_profile_limit_offset()), R3_counters); 771 // Test to see if we should create a method data oop. 772 __ cmpw(CCR0, Rsum_ivc_bec, profile_limit); 773 __ blt(CCR0, *profile_method_continue); 774 // If no method data exists, go to profile_method. 775 __ test_method_data_pointer(*profile_method); 776 } 777 // Finally check for counter overflow. 778 if (overflow) { 779 const Register invocation_limit = Rscratch1; 780 __ lwz(invocation_limit, in_bytes(MethodCounters::interpreter_invocation_limit_offset()), R3_counters); 781 __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); 782 __ bge(CCR0, *overflow); 783 } 784 785 __ bind(done); 786 } 787 } 788 789 // Generate code to initiate compilation on invocation counter overflow. 790 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { 791 // Generate code to initiate compilation on the counter overflow. 792 793 // InterpreterRuntime::frequency_counter_overflow takes one arguments, 794 // which indicates if the counter overflow occurs at a backwards branch (NULL bcp) 795 // We pass zero in. 796 // The call returns the address of the verified entry point for the method or NULL 797 // if the compilation did not complete (either went background or bailed out). 798 // 799 // Unlike the C++ interpreter above: Check exceptions! 800 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed 801 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. 802 803 __ li(R4_ARG2, 0); 804 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 805 806 // Returns verified_entry_point or NULL. 807 // We ignore it in any case. 808 __ b(continue_entry); 809 } 810 811 // See if we've got enough room on the stack for locals plus overhead below 812 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError 813 // without going through the signal handler, i.e., reserved and yellow zones 814 // will not be made usable. The shadow zone must suffice to handle the 815 // overflow. 816 // 817 // Kills Rmem_frame_size, Rscratch1. 818 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { 819 Label done; 820 assert_different_registers(Rmem_frame_size, Rscratch1); 821 822 BLOCK_COMMENT("stack_overflow_check_with_compare {"); 823 __ sub(Rmem_frame_size, R1_SP, Rmem_frame_size); 824 __ ld(Rscratch1, thread_(stack_overflow_limit)); 825 __ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); 826 __ bgt(CCR0/*is_stack_overflow*/, done); 827 828 // The stack overflows. Load target address of the runtime stub and call it. 829 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order"); 830 __ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0); 831 __ mtctr(Rscratch1); 832 // Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame). 833 #ifdef ASSERT 834 Label frame_not_shrunk; 835 __ cmpld(CCR0, R1_SP, R21_sender_SP); 836 __ ble(CCR0, frame_not_shrunk); 837 __ stop("frame shrunk", 0x546); 838 __ bind(frame_not_shrunk); 839 __ ld(Rscratch1, 0, R1_SP); 840 __ ld(R0, 0, R21_sender_SP); 841 __ cmpd(CCR0, R0, Rscratch1); 842 __ asm_assert_eq("backlink", 0x547); 843 #endif // ASSERT 844 __ mr(R1_SP, R21_sender_SP); 845 __ bctr(); 846 847 __ align(32, 12); 848 __ bind(done); 849 BLOCK_COMMENT("} stack_overflow_check_with_compare"); 850 } 851 852 // Lock the current method, interpreter register window must be set up! 853 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { 854 const Register Robj_to_lock = Rscratch2; 855 856 { 857 if (!flags_preloaded) { 858 __ lwz(Rflags, method_(access_flags)); 859 } 860 861 #ifdef ASSERT 862 // Check if methods needs synchronization. 863 { 864 Label Lok; 865 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); 866 __ btrue(CCR0,Lok); 867 __ stop("method doesn't need synchronization"); 868 __ bind(Lok); 869 } 870 #endif // ASSERT 871 } 872 873 // Get synchronization object to Rscratch2. 874 { 875 Label Lstatic; 876 Label Ldone; 877 878 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); 879 __ btrue(CCR0, Lstatic); 880 881 // Non-static case: load receiver obj from stack and we're done. 882 __ ld(Robj_to_lock, R18_locals); 883 __ b(Ldone); 884 885 __ bind(Lstatic); // Static case: Lock the java mirror 886 // Load mirror from interpreter frame. 887 __ ld(Robj_to_lock, _abi(callers_sp), R1_SP); 888 __ ld(Robj_to_lock, _ijava_state_neg(mirror), Robj_to_lock); 889 890 __ bind(Ldone); 891 __ verify_oop(Robj_to_lock); 892 } 893 894 // Got the oop to lock => execute! 895 __ add_monitor_to_stack(true, Rscratch1, R0); 896 897 __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 898 __ lock_object(R26_monitor, Robj_to_lock); 899 } 900 901 // Generate a fixed interpreter frame for pure interpreter 902 // and I2N native transition frames. 903 // 904 // Before (stack grows downwards): 905 // 906 // | ... | 907 // |------------- | 908 // | java arg0 | 909 // | ... | 910 // | java argn | 911 // | | <- R15_esp 912 // | | 913 // |--------------| 914 // | abi_112 | 915 // | | <- R1_SP 916 // |==============| 917 // 918 // 919 // After: 920 // 921 // | ... | 922 // | java arg0 |<- R18_locals 923 // | ... | 924 // | java argn | 925 // |--------------| 926 // | | 927 // | java locals | 928 // | | 929 // |--------------| 930 // | abi_48 | 931 // |==============| 932 // | | 933 // | istate | 934 // | | 935 // |--------------| 936 // | monitor |<- R26_monitor 937 // |--------------| 938 // | |<- R15_esp 939 // | expression | 940 // | stack | 941 // | | 942 // |--------------| 943 // | | 944 // | abi_112 |<- R1_SP 945 // |==============| 946 // 947 // The top most frame needs an abi space of 112 bytes. This space is needed, 948 // since we call to c. The c function may spill their arguments to the caller 949 // frame. When we call to java, we don't need these spill slots. In order to save 950 // space on the stack, we resize the caller. However, java locals reside in 951 // the caller frame and the frame has to be increased. The frame_size for the 952 // current frame was calculated based on max_stack as size for the expression 953 // stack. At the call, just a part of the expression stack might be used. 954 // We don't want to waste this space and cut the frame back accordingly. 955 // The resulting amount for resizing is calculated as follows: 956 // resize = (number_of_locals - number_of_arguments) * slot_size 957 // + (R1_SP - R15_esp) + 48 958 // 959 // The size for the callee frame is calculated: 960 // framesize = 112 + max_stack + monitor + state_size 961 // 962 // maxstack: Max number of slots on the expression stack, loaded from the method. 963 // monitor: We statically reserve room for one monitor object. 964 // state_size: We save the current state of the interpreter to this area. 965 // 966 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { 967 Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. 968 top_frame_size = R7_ARG5, 969 Rconst_method = R8_ARG6; 970 971 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size); 972 973 __ ld(Rconst_method, method_(const)); 974 __ lhz(Rsize_of_parameters /* number of params */, 975 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); 976 if (native_call) { 977 // If we're calling a native method, we reserve space for the worst-case signature 978 // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2). 979 // We add two slots to the parameter_count, one for the jni 980 // environment and one for a possible native mirror. 981 Label skip_native_calculate_max_stack; 982 __ addi(top_frame_size, Rsize_of_parameters, 2); 983 __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters); 984 __ bge(CCR0, skip_native_calculate_max_stack); 985 __ li(top_frame_size, Argument::n_register_parameters); 986 __ bind(skip_native_calculate_max_stack); 987 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 988 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 989 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 990 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. 991 } else { 992 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); 993 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); 994 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); 995 __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); 996 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 997 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! 998 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); 999 __ add(parent_frame_resize, parent_frame_resize, R11_scratch1); 1000 } 1001 1002 // Compute top frame size. 1003 __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); 1004 1005 // Cut back area between esp and max_stack. 1006 __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); 1007 1008 __ round_to(top_frame_size, frame::alignment_in_bytes); 1009 __ round_to(parent_frame_resize, frame::alignment_in_bytes); 1010 // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. 1011 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. 1012 1013 if (!native_call) { 1014 // Stack overflow check. 1015 // Native calls don't need the stack size check since they have no 1016 // expression stack and the arguments are already on the stack and 1017 // we only add a handful of words to the stack. 1018 __ add(R11_scratch1, parent_frame_resize, top_frame_size); 1019 generate_stack_overflow_check(R11_scratch1, R12_scratch2); 1020 } 1021 1022 // Set up interpreter state registers. 1023 1024 __ add(R18_locals, R15_esp, Rsize_of_parameters); 1025 __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method); 1026 __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache); 1027 1028 // Set method data pointer. 1029 if (ProfileInterpreter) { 1030 Label zero_continue; 1031 __ ld(R28_mdx, method_(method_data)); 1032 __ cmpdi(CCR0, R28_mdx, 0); 1033 __ beq(CCR0, zero_continue); 1034 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 1035 __ bind(zero_continue); 1036 } 1037 1038 if (native_call) { 1039 __ li(R14_bcp, 0); // Must initialize. 1040 } else { 1041 __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method); 1042 } 1043 1044 // Resize parent frame. 1045 __ mflr(R12_scratch2); 1046 __ neg(parent_frame_resize, parent_frame_resize); 1047 __ resize_frame(parent_frame_resize, R11_scratch1); 1048 __ std(R12_scratch2, _abi(lr), R1_SP); 1049 1050 // Get mirror and store it in the frame as GC root for this Method*. 1051 __ load_mirror_from_const_method(R12_scratch2, Rconst_method); 1052 1053 __ addi(R26_monitor, R1_SP, - frame::ijava_state_size); 1054 __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); 1055 1056 // Store values. 1057 // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls 1058 // in InterpreterMacroAssembler::call_from_interpreter. 1059 __ std(R19_method, _ijava_state_neg(method), R1_SP); 1060 __ std(R12_scratch2, _ijava_state_neg(mirror), R1_SP); 1061 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); 1062 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); 1063 __ std(R18_locals, _ijava_state_neg(locals), R1_SP); 1064 1065 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only 1066 // be found in the frame after save_interpreter_state is done. This is always true 1067 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, 1068 // because e.g. frame::interpreter_frame_bcp() will not access the correct value 1069 // (Enhanced Stack Trace). 1070 // The signal handler does not save the interpreter state into the frame. 1071 __ li(R0, 0); 1072 #ifdef ASSERT 1073 // Fill remaining slots with constants. 1074 __ load_const_optimized(R11_scratch1, 0x5afe); 1075 __ load_const_optimized(R12_scratch2, 0xdead); 1076 #endif 1077 // We have to initialize some frame slots for native calls (accessed by GC). 1078 if (native_call) { 1079 __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP); 1080 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); 1081 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } 1082 } 1083 #ifdef ASSERT 1084 else { 1085 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); 1086 __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP); 1087 __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP); 1088 } 1089 __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP); 1090 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); 1091 __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP); 1092 __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP); 1093 #endif 1094 __ subf(R12_scratch2, top_frame_size, R1_SP); 1095 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); 1096 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); 1097 1098 // Push top frame. 1099 __ push_frame(top_frame_size, R11_scratch1); 1100 } 1101 1102 // End of helpers 1103 1104 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 1105 1106 // Decide what to do: Use same platform specific instructions and runtime calls as compilers. 1107 bool use_instruction = false; 1108 address runtime_entry = NULL; 1109 int num_args = 1; 1110 bool double_precision = true; 1111 1112 // PPC64 specific: 1113 switch (kind) { 1114 case Interpreter::java_lang_math_sqrt: use_instruction = VM_Version::has_fsqrt(); break; 1115 case Interpreter::java_lang_math_abs: use_instruction = true; break; 1116 case Interpreter::java_lang_math_fmaF: 1117 case Interpreter::java_lang_math_fmaD: use_instruction = UseFMA; break; 1118 default: break; // Fall back to runtime call. 1119 } 1120 1121 switch (kind) { 1122 case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; 1123 case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; 1124 case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; 1125 case Interpreter::java_lang_math_abs : /* run interpreted */ break; 1126 case Interpreter::java_lang_math_sqrt : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); break; 1127 case Interpreter::java_lang_math_log : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; 1128 case Interpreter::java_lang_math_log10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; 1129 case Interpreter::java_lang_math_pow : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); num_args = 2; break; 1130 case Interpreter::java_lang_math_exp : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); break; 1131 case Interpreter::java_lang_math_fmaF : /* run interpreted */ num_args = 3; double_precision = false; break; 1132 case Interpreter::java_lang_math_fmaD : /* run interpreted */ num_args = 3; break; 1133 default: ShouldNotReachHere(); 1134 } 1135 1136 // Use normal entry if neither instruction nor runtime call is used. 1137 if (!use_instruction && runtime_entry == NULL) return NULL; 1138 1139 address entry = __ pc(); 1140 1141 // Load arguments 1142 assert(num_args <= 13, "passed in registers"); 1143 if (double_precision) { 1144 int offset = (2 * num_args - 1) * Interpreter::stackElementSize; 1145 for (int i = 0; i < num_args; ++i) { 1146 __ lfd(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1147 offset -= 2 * Interpreter::stackElementSize; 1148 } 1149 } else { 1150 int offset = num_args * Interpreter::stackElementSize; 1151 for (int i = 0; i < num_args; ++i) { 1152 __ lfs(as_FloatRegister(F1_ARG1->encoding() + i), offset, R15_esp); 1153 offset -= Interpreter::stackElementSize; 1154 } 1155 } 1156 1157 if (use_instruction) { 1158 switch (kind) { 1159 case Interpreter::java_lang_math_sqrt: __ fsqrt(F1_RET, F1); break; 1160 case Interpreter::java_lang_math_abs: __ fabs(F1_RET, F1); break; 1161 case Interpreter::java_lang_math_fmaF: __ fmadds(F1_RET, F1, F2, F3); break; 1162 case Interpreter::java_lang_math_fmaD: __ fmadd(F1_RET, F1, F2, F3); break; 1163 default: ShouldNotReachHere(); 1164 } 1165 } else { 1166 // Comment: Can use tail call if the unextended frame is always C ABI compliant: 1167 //__ load_const_optimized(R12_scratch2, runtime_entry, R0); 1168 //__ call_c_and_return_to_caller(R12_scratch2); 1169 1170 // Push a new C frame and save LR. 1171 __ save_LR_CR(R0); 1172 __ push_frame_reg_args(0, R11_scratch1); 1173 1174 __ call_VM_leaf(runtime_entry); 1175 1176 // Pop the C frame and restore LR. 1177 __ pop_frame(); 1178 __ restore_LR_CR(R0); 1179 } 1180 1181 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1182 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1183 __ blr(); 1184 1185 __ flush(); 1186 1187 return entry; 1188 } 1189 1190 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) { 1191 // Quick & dirty stack overflow checking: bang the stack & handle trap. 1192 // Note that we do the banging after the frame is setup, since the exception 1193 // handling code expects to find a valid interpreter frame on the stack. 1194 // Doing the banging earlier fails if the caller frame is not an interpreter 1195 // frame. 1196 // (Also, the exception throwing code expects to unlock any synchronized 1197 // method receiever, so do the banging after locking the receiver.) 1198 1199 // Bang each page in the shadow zone. We can't assume it's been done for 1200 // an interpreter frame with greater than a page of locals, so each page 1201 // needs to be checked. Only true for non-native. 1202 if (UseStackBanging) { 1203 const int page_size = os::vm_page_size(); 1204 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size; 1205 const int start_page = native_call ? n_shadow_pages : 1; 1206 BLOCK_COMMENT("bang_stack_shadow_pages:"); 1207 for (int pages = start_page; pages <= n_shadow_pages; pages++) { 1208 __ bang_stack_with_offset(pages*page_size); 1209 } 1210 } 1211 } 1212 1213 // Interpreter stub for calling a native method. (asm interpreter) 1214 // This sets up a somewhat different looking stack for calling the 1215 // native method than the typical interpreter frame setup. 1216 // 1217 // On entry: 1218 // R19_method - method 1219 // R16_thread - JavaThread* 1220 // R15_esp - intptr_t* sender tos 1221 // 1222 // abstract stack (grows up) 1223 // [ IJava (caller of JNI callee) ] <-- ASP 1224 // ... 1225 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 1226 1227 address entry = __ pc(); 1228 1229 const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1230 1231 // ----------------------------------------------------------------------------- 1232 // Allocate a new frame that represents the native callee (i2n frame). 1233 // This is not a full-blown interpreter frame, but in particular, the 1234 // following registers are valid after this: 1235 // - R19_method 1236 // - R18_local (points to start of arguments to native function) 1237 // 1238 // abstract stack (grows up) 1239 // [ IJava (caller of JNI callee) ] <-- ASP 1240 // ... 1241 1242 const Register signature_handler_fd = R11_scratch1; 1243 const Register pending_exception = R0; 1244 const Register result_handler_addr = R31; 1245 const Register native_method_fd = R11_scratch1; 1246 const Register access_flags = R22_tmp2; 1247 const Register active_handles = R11_scratch1; // R26_monitor saved to state. 1248 const Register sync_state = R12_scratch2; 1249 const Register sync_state_addr = sync_state; // Address is dead after use. 1250 const Register suspend_flags = R11_scratch1; 1251 1252 //============================================================================= 1253 // Allocate new frame and initialize interpreter state. 1254 1255 Label exception_return; 1256 Label exception_return_sync_check; 1257 Label stack_overflow_return; 1258 1259 // Generate new interpreter state and jump to stack_overflow_return in case of 1260 // a stack overflow. 1261 //generate_compute_interpreter_state(stack_overflow_return); 1262 1263 Register size_of_parameters = R22_tmp2; 1264 1265 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); 1266 1267 //============================================================================= 1268 // Increment invocation counter. On overflow, entry to JNI method 1269 // will be compiled. 1270 Label invocation_counter_overflow, continue_after_compile; 1271 if (inc_counter) { 1272 if (synchronized) { 1273 // Since at this point in the method invocation the exception handler 1274 // would try to exit the monitor of synchronized methods which hasn't 1275 // been entered yet, we set the thread local variable 1276 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1277 // runtime, exception handling i.e. unlock_if_synchronized_method will 1278 // check this thread local flag. 1279 // This flag has two effects, one is to force an unwind in the topmost 1280 // interpreter frame and not perform an unlock while doing so. 1281 __ li(R0, 1); 1282 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1283 } 1284 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1285 1286 BIND(continue_after_compile); 1287 } 1288 1289 bang_stack_shadow_pages(true); 1290 1291 if (inc_counter) { 1292 // Reset the _do_not_unlock_if_synchronized flag. 1293 if (synchronized) { 1294 __ li(R0, 0); 1295 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1296 } 1297 } 1298 1299 // access_flags = method->access_flags(); 1300 // Load access flags. 1301 assert(access_flags->is_nonvolatile(), 1302 "access_flags must be in a non-volatile register"); 1303 // Type check. 1304 assert(4 == sizeof(AccessFlags), "unexpected field size"); 1305 __ lwz(access_flags, method_(access_flags)); 1306 1307 // We don't want to reload R19_method and access_flags after calls 1308 // to some helper functions. 1309 assert(R19_method->is_nonvolatile(), 1310 "R19_method must be a non-volatile register"); 1311 1312 // Check for synchronized methods. Must happen AFTER invocation counter 1313 // check, so method is not locked if counter overflows. 1314 1315 if (synchronized) { 1316 lock_method(access_flags, R11_scratch1, R12_scratch2, true); 1317 1318 // Update monitor in state. 1319 __ ld(R11_scratch1, 0, R1_SP); 1320 __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1); 1321 } 1322 1323 // jvmti/jvmpi support 1324 __ notify_method_entry(); 1325 1326 //============================================================================= 1327 // Get and call the signature handler. 1328 1329 __ ld(signature_handler_fd, method_(signature_handler)); 1330 Label call_signature_handler; 1331 1332 __ cmpdi(CCR0, signature_handler_fd, 0); 1333 __ bne(CCR0, call_signature_handler); 1334 1335 // Method has never been called. Either generate a specialized 1336 // handler or point to the slow one. 1337 // 1338 // Pass parameter 'false' to avoid exception check in call_VM. 1339 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); 1340 1341 // Check for an exception while looking up the target method. If we 1342 // incurred one, bail. 1343 __ ld(pending_exception, thread_(pending_exception)); 1344 __ cmpdi(CCR0, pending_exception, 0); 1345 __ bne(CCR0, exception_return_sync_check); // Has pending exception. 1346 1347 // Reload signature handler, it may have been created/assigned in the meanwhile. 1348 __ ld(signature_handler_fd, method_(signature_handler)); 1349 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). 1350 1351 BIND(call_signature_handler); 1352 1353 // Before we call the signature handler we push a new frame to 1354 // protect the interpreter frame volatile registers when we return 1355 // from jni but before we can get back to Java. 1356 1357 // First set the frame anchor while the SP/FP registers are 1358 // convenient and the slow signature handler can use this same frame 1359 // anchor. 1360 1361 // We have a TOP_IJAVA_FRAME here, which belongs to us. 1362 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); 1363 1364 // Now the interpreter frame (and its call chain) have been 1365 // invalidated and flushed. We are now protected against eager 1366 // being enabled in native code. Even if it goes eager the 1367 // registers will be reloaded as clean and we will invalidate after 1368 // the call so no spurious flush should be possible. 1369 1370 // Call signature handler and pass locals address. 1371 // 1372 // Our signature handlers copy required arguments to the C stack 1373 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. 1374 __ mr(R3_ARG1, R18_locals); 1375 #if !defined(ABI_ELFv2) 1376 __ ld(signature_handler_fd, 0, signature_handler_fd); 1377 #endif 1378 1379 __ call_stub(signature_handler_fd); 1380 1381 // Remove the register parameter varargs slots we allocated in 1382 // compute_interpreter_state. SP+16 ends up pointing to the ABI 1383 // outgoing argument area. 1384 // 1385 // Not needed on PPC64. 1386 //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord); 1387 1388 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); 1389 // Save across call to native method. 1390 __ mr(result_handler_addr, R3_RET); 1391 1392 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. 1393 1394 // Set up fixed parameters and call the native method. 1395 // If the method is static, get mirror into R4_ARG2. 1396 { 1397 Label method_is_not_static; 1398 // Access_flags is non-volatile and still, no need to restore it. 1399 1400 // Restore access flags. 1401 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); 1402 __ bfalse(CCR0, method_is_not_static); 1403 1404 __ ld(R11_scratch1, _abi(callers_sp), R1_SP); 1405 // Load mirror from interpreter frame. 1406 __ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1); 1407 // R4_ARG2 = &state->_oop_temp; 1408 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); 1409 __ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); 1410 BIND(method_is_not_static); 1411 } 1412 1413 // At this point, arguments have been copied off the stack into 1414 // their JNI positions. Oops are boxed in-place on the stack, with 1415 // handles copied to arguments. The result handler address is in a 1416 // register. 1417 1418 // Pass JNIEnv address as first parameter. 1419 __ addir(R3_ARG1, thread_(jni_environment)); 1420 1421 // Load the native_method entry before we change the thread state. 1422 __ ld(native_method_fd, method_(native_function)); 1423 1424 //============================================================================= 1425 // Transition from _thread_in_Java to _thread_in_native. As soon as 1426 // we make this change the safepoint code needs to be certain that 1427 // the last Java frame we established is good. The pc in that frame 1428 // just needs to be near here not an actual return address. 1429 1430 // We use release_store_fence to update values like the thread state, where 1431 // we don't want the current thread to continue until all our prior memory 1432 // accesses (including the new thread state) are visible to other threads. 1433 __ li(R0, _thread_in_native); 1434 __ release(); 1435 1436 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); 1437 __ stw(R0, thread_(thread_state)); 1438 1439 //============================================================================= 1440 // Call the native method. Argument registers must not have been 1441 // overwritten since "__ call_stub(signature_handler);" (except for 1442 // ARG1 and ARG2 for static methods). 1443 __ call_c(native_method_fd); 1444 1445 __ li(R0, 0); 1446 __ ld(R11_scratch1, 0, R1_SP); 1447 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1448 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1449 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset 1450 1451 // Note: C++ interpreter needs the following here: 1452 // The frame_manager_lr field, which we use for setting the last 1453 // java frame, gets overwritten by the signature handler. Restore 1454 // it now. 1455 //__ get_PC_trash_LR(R11_scratch1); 1456 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 1457 1458 // Because of GC R19_method may no longer be valid. 1459 1460 // Block, if necessary, before resuming in _thread_in_Java state. 1461 // In order for GC to work, don't clear the last_Java_sp until after 1462 // blocking. 1463 1464 //============================================================================= 1465 // Switch thread to "native transition" state before reading the 1466 // synchronization state. This additional state is necessary 1467 // because reading and testing the synchronization state is not 1468 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, 1469 // in _thread_in_native state, loads _not_synchronized and is 1470 // preempted. VM thread changes sync state to synchronizing and 1471 // suspends threads for GC. Thread A is resumed to finish this 1472 // native method, but doesn't block here since it didn't see any 1473 // synchronization in progress, and escapes. 1474 1475 // We use release_store_fence to update values like the thread state, where 1476 // we don't want the current thread to continue until all our prior memory 1477 // accesses (including the new thread state) are visible to other threads. 1478 __ li(R0/*thread_state*/, _thread_in_native_trans); 1479 __ release(); 1480 __ stw(R0/*thread_state*/, thread_(thread_state)); 1481 __ fence(); 1482 1483 // Now before we return to java we must look for a current safepoint 1484 // (a new safepoint can not start since we entered native_trans). 1485 // We must check here because a current safepoint could be modifying 1486 // the callers registers right this moment. 1487 1488 // Acquire isn't strictly necessary here because of the fence, but 1489 // sync_state is declared to be volatile, so we do it anyway 1490 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). 1491 1492 Label do_safepoint, sync_check_done; 1493 // No synchronization in progress nor yet synchronized. 1494 __ safepoint_poll(do_safepoint, sync_state); 1495 1496 // Not suspended. 1497 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); 1498 __ lwz(suspend_flags, thread_(suspend_flags)); 1499 __ cmpwi(CCR1, suspend_flags, 0); 1500 __ beq(CCR1, sync_check_done); 1501 1502 __ bind(do_safepoint); 1503 __ isync(); 1504 // Block. We do the call directly and leave the current 1505 // last_Java_frame setup undisturbed. We must save any possible 1506 // native result across the call. No oop is present. 1507 1508 __ mr(R3_ARG1, R16_thread); 1509 #if defined(ABI_ELFv2) 1510 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 1511 relocInfo::none); 1512 #else 1513 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), 1514 relocInfo::none); 1515 #endif 1516 1517 __ bind(sync_check_done); 1518 1519 //============================================================================= 1520 // <<<<<< Back in Interpreter Frame >>>>> 1521 1522 // We are in thread_in_native_trans here and back in the normal 1523 // interpreter frame. We don't have to do anything special about 1524 // safepoints and we can switch to Java mode anytime we are ready. 1525 1526 // Note: frame::interpreter_frame_result has a dependency on how the 1527 // method result is saved across the call to post_method_exit. For 1528 // native methods it assumes that the non-FPU/non-void result is 1529 // saved in _native_lresult and a FPU result in _native_fresult. If 1530 // this changes then the interpreter_frame_result implementation 1531 // will need to be updated too. 1532 1533 // On PPC64, we have stored the result directly after the native call. 1534 1535 //============================================================================= 1536 // Back in Java 1537 1538 // We use release_store_fence to update values like the thread state, where 1539 // we don't want the current thread to continue until all our prior memory 1540 // accesses (including the new thread state) are visible to other threads. 1541 __ li(R0/*thread_state*/, _thread_in_Java); 1542 __ lwsync(); // Acquire safepoint and suspend state, release thread state. 1543 __ stw(R0/*thread_state*/, thread_(thread_state)); 1544 1545 if (CheckJNICalls) { 1546 // clear_pending_jni_exception_check 1547 __ load_const_optimized(R0, 0L); 1548 __ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread); 1549 } 1550 1551 __ reset_last_Java_frame(); 1552 1553 // Jvmdi/jvmpi support. Whether we've got an exception pending or 1554 // not, and whether unlocking throws an exception or not, we notify 1555 // on native method exit. If we do have an exception, we'll end up 1556 // in the caller's context to handle it, so if we don't do the 1557 // notify here, we'll drop it on the floor. 1558 __ notify_method_exit(true/*native method*/, 1559 ilgl /*illegal state (not used for native methods)*/, 1560 InterpreterMacroAssembler::NotifyJVMTI, 1561 false /*check_exceptions*/); 1562 1563 //============================================================================= 1564 // Handle exceptions 1565 1566 if (synchronized) { 1567 // Don't check for exceptions since we're still in the i2n frame. Do that 1568 // manually afterwards. 1569 __ unlock_object(R26_monitor, false); // Can also unlock methods. 1570 } 1571 1572 // Reset active handles after returning from native. 1573 // thread->active_handles()->clear(); 1574 __ ld(active_handles, thread_(active_handles)); 1575 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); 1576 __ li(R0, 0); 1577 __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles); 1578 1579 Label exception_return_sync_check_already_unlocked; 1580 __ ld(R0/*pending_exception*/, thread_(pending_exception)); 1581 __ cmpdi(CCR0, R0/*pending_exception*/, 0); 1582 __ bne(CCR0, exception_return_sync_check_already_unlocked); 1583 1584 //----------------------------------------------------------------------------- 1585 // No exception pending. 1586 1587 // Move native method result back into proper registers and return. 1588 // Invoke result handler (may unbox/promote). 1589 __ ld(R11_scratch1, 0, R1_SP); 1590 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); 1591 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); 1592 __ call_stub(result_handler_addr); 1593 1594 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1595 1596 // Must use the return pc which was loaded from the caller's frame 1597 // as the VM uses return-pc-patching for deoptimization. 1598 __ mtlr(R0); 1599 __ blr(); 1600 1601 //----------------------------------------------------------------------------- 1602 // An exception is pending. We call into the runtime only if the 1603 // caller was not interpreted. If it was interpreted the 1604 // interpreter will do the correct thing. If it isn't interpreted 1605 // (call stub/compiled code) we will change our return and continue. 1606 1607 BIND(exception_return_sync_check); 1608 1609 if (synchronized) { 1610 // Don't check for exceptions since we're still in the i2n frame. Do that 1611 // manually afterwards. 1612 __ unlock_object(R26_monitor, false); // Can also unlock methods. 1613 } 1614 BIND(exception_return_sync_check_already_unlocked); 1615 1616 const Register return_pc = R31; 1617 1618 __ ld(return_pc, 0, R1_SP); 1619 __ ld(return_pc, _abi(lr), return_pc); 1620 1621 // Get the address of the exception handler. 1622 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 1623 R16_thread, 1624 return_pc /* return pc */); 1625 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); 1626 1627 // Load the PC of the the exception handler into LR. 1628 __ mtlr(R3_RET); 1629 1630 // Load exception into R3_ARG1 and clear pending exception in thread. 1631 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); 1632 __ li(R4_ARG2, 0); 1633 __ std(R4_ARG2, thread_(pending_exception)); 1634 1635 // Load the original return pc into R4_ARG2. 1636 __ mr(R4_ARG2/*issuing_pc*/, return_pc); 1637 1638 // Return to exception handler. 1639 __ blr(); 1640 1641 //============================================================================= 1642 // Counter overflow. 1643 1644 if (inc_counter) { 1645 // Handle invocation counter overflow. 1646 __ bind(invocation_counter_overflow); 1647 1648 generate_counter_overflow(continue_after_compile); 1649 } 1650 1651 return entry; 1652 } 1653 1654 // Generic interpreted method entry to (asm) interpreter. 1655 // 1656 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1657 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1658 address entry = __ pc(); 1659 // Generate the code to allocate the interpreter stack frame. 1660 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. 1661 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. 1662 1663 // Does also a stack check to assure this frame fits on the stack. 1664 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); 1665 1666 // -------------------------------------------------------------------------- 1667 // Zero out non-parameter locals. 1668 // Note: *Always* zero out non-parameter locals as Sparc does. It's not 1669 // worth to ask the flag, just do it. 1670 Register Rslot_addr = R6_ARG4, 1671 Rnum = R7_ARG5; 1672 Label Lno_locals, Lzero_loop; 1673 1674 // Set up the zeroing loop. 1675 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); 1676 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); 1677 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); 1678 __ beq(CCR0, Lno_locals); 1679 __ li(R0, 0); 1680 __ mtctr(Rnum); 1681 1682 // The zero locals loop. 1683 __ bind(Lzero_loop); 1684 __ std(R0, 0, Rslot_addr); 1685 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); 1686 __ bdnz(Lzero_loop); 1687 1688 __ bind(Lno_locals); 1689 1690 // -------------------------------------------------------------------------- 1691 // Counter increment and overflow check. 1692 Label invocation_counter_overflow, 1693 profile_method, 1694 profile_method_continue; 1695 if (inc_counter || ProfileInterpreter) { 1696 1697 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; 1698 if (synchronized) { 1699 // Since at this point in the method invocation the exception handler 1700 // would try to exit the monitor of synchronized methods which hasn't 1701 // been entered yet, we set the thread local variable 1702 // _do_not_unlock_if_synchronized to true. If any exception was thrown by 1703 // runtime, exception handling i.e. unlock_if_synchronized_method will 1704 // check this thread local flag. 1705 // This flag has two effects, one is to force an unwind in the topmost 1706 // interpreter frame and not perform an unlock while doing so. 1707 __ li(R0, 1); 1708 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1709 } 1710 1711 // Argument and return type profiling. 1712 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); 1713 1714 // Increment invocation counter and check for overflow. 1715 if (inc_counter) { 1716 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1717 } 1718 1719 __ bind(profile_method_continue); 1720 } 1721 1722 bang_stack_shadow_pages(false); 1723 1724 if (inc_counter || ProfileInterpreter) { 1725 // Reset the _do_not_unlock_if_synchronized flag. 1726 if (synchronized) { 1727 __ li(R0, 0); 1728 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 1729 } 1730 } 1731 1732 // -------------------------------------------------------------------------- 1733 // Locking of synchronized methods. Must happen AFTER invocation_counter 1734 // check and stack overflow check, so method is not locked if overflows. 1735 if (synchronized) { 1736 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); 1737 } 1738 #ifdef ASSERT 1739 else { 1740 Label Lok; 1741 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); 1742 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); 1743 __ asm_assert_eq("method needs synchronization", 0x8521); 1744 __ bind(Lok); 1745 } 1746 #endif // ASSERT 1747 1748 __ verify_thread(); 1749 1750 // -------------------------------------------------------------------------- 1751 // JVMTI support 1752 __ notify_method_entry(); 1753 1754 // -------------------------------------------------------------------------- 1755 // Start executing instructions. 1756 __ dispatch_next(vtos); 1757 1758 // -------------------------------------------------------------------------- 1759 // Out of line counter overflow and MDO creation code. 1760 if (ProfileInterpreter) { 1761 // We have decided to profile this method in the interpreter. 1762 __ bind(profile_method); 1763 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1764 __ set_method_data_pointer_for_bcp(); 1765 __ b(profile_method_continue); 1766 } 1767 1768 if (inc_counter) { 1769 // Handle invocation counter overflow. 1770 __ bind(invocation_counter_overflow); 1771 generate_counter_overflow(profile_method_continue); 1772 } 1773 return entry; 1774 } 1775 1776 // CRC32 Intrinsics. 1777 // 1778 // Contract on scratch and work registers. 1779 // ======================================= 1780 // 1781 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers. 1782 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set. 1783 // You can't rely on these registers across calls. 1784 // 1785 // The generators for CRC32_update and for CRC32_updateBytes use the 1786 // scratch/work register set internally, passing the work registers 1787 // as arguments to the MacroAssembler emitters as required. 1788 // 1789 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments. 1790 // Their contents is not constant but may change according to the requirements 1791 // of the emitted code. 1792 // 1793 // All other registers from the scratch/work register set are used "internally" 1794 // and contain garbage (i.e. unpredictable values) once blr() is reached. 1795 // Basically, only R3_RET contains a defined value which is the function result. 1796 // 1797 /** 1798 * Method entry for static native methods: 1799 * int java.util.zip.CRC32.update(int crc, int b) 1800 */ 1801 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { 1802 if (UseCRC32Intrinsics) { 1803 address start = __ pc(); // Remember stub start address (is rtn value). 1804 Label slow_path; 1805 1806 // Safepoint check 1807 const Register sync_state = R11_scratch1; 1808 __ safepoint_poll(slow_path, sync_state); 1809 1810 // We don't generate local frame and don't align stack because 1811 // we not even call stub code (we generate the code inline) 1812 // and there is no safepoint on this path. 1813 1814 // Load java parameters. 1815 // R15_esp is callers operand stack pointer, i.e. it points to the parameters. 1816 const Register argP = R15_esp; 1817 const Register crc = R3_ARG1; // crc value 1818 const Register data = R4_ARG2; 1819 const Register table = R5_ARG3; // address of crc32 table 1820 1821 BLOCK_COMMENT("CRC32_update {"); 1822 1823 // Arguments are reversed on java expression stack 1824 #ifdef VM_LITTLE_ENDIAN 1825 int data_offs = 0+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1826 // Being passed as an int, the single byte is at offset +0. 1827 #else 1828 int data_offs = 3+1*wordSize; // (stack) address of byte value. Emitter expects address, not value. 1829 // Being passed from java as an int, the single byte is at offset +3. 1830 #endif 1831 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. 1832 __ lbz(data, data_offs, argP); // Byte from buffer, zero-extended. 1833 __ load_const_optimized(table, StubRoutines::crc_table_addr(), R0); 1834 __ kernel_crc32_singleByteReg(crc, data, table, true); 1835 1836 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1837 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1838 __ blr(); 1839 1840 // Generate a vanilla native entry as the slow path. 1841 BLOCK_COMMENT("} CRC32_update"); 1842 BIND(slow_path); 1843 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1844 return start; 1845 } 1846 1847 return NULL; 1848 } 1849 1850 /** 1851 * Method entry for static native methods: 1852 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) 1853 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) 1854 */ 1855 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1856 if (UseCRC32Intrinsics) { 1857 address start = __ pc(); // Remember stub start address (is rtn value). 1858 Label slow_path; 1859 1860 // Safepoint check 1861 const Register sync_state = R11_scratch1; 1862 __ safepoint_poll(slow_path, sync_state); 1863 1864 // We don't generate local frame and don't align stack because 1865 // we not even call stub code (we generate the code inline) 1866 // and there is no safepoint on this path. 1867 1868 // Load parameters. 1869 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1870 const Register argP = R15_esp; 1871 const Register crc = R3_ARG1; // crc value 1872 const Register data = R4_ARG2; // address of java byte array 1873 const Register dataLen = R5_ARG3; // source data len 1874 const Register tmp = R11_scratch1; 1875 1876 // Arguments are reversed on java expression stack. 1877 // Calculate address of start element. 1878 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct". 1879 BLOCK_COMMENT("CRC32_updateByteBuffer {"); 1880 // crc @ (SP + 5W) (32bit) 1881 // buf @ (SP + 3W) (64bit ptr to long array) 1882 // off @ (SP + 2W) (32bit) 1883 // dataLen @ (SP + 1W) (32bit) 1884 // data = buf + off 1885 __ ld( data, 3*wordSize, argP); // start of byte buffer 1886 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1887 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1888 __ lwz( crc, 5*wordSize, argP); // current crc state 1889 __ add( data, data, tmp); // Add byte buffer offset. 1890 } else { // Used for "updateBytes update". 1891 BLOCK_COMMENT("CRC32_updateBytes {"); 1892 // crc @ (SP + 4W) (32bit) 1893 // buf @ (SP + 3W) (64bit ptr to byte array) 1894 // off @ (SP + 2W) (32bit) 1895 // dataLen @ (SP + 1W) (32bit) 1896 // data = buf + off + base_offset 1897 __ ld( data, 3*wordSize, argP); // start of byte buffer 1898 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1899 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1900 __ add( data, data, tmp); // add byte buffer offset 1901 __ lwz( crc, 4*wordSize, argP); // current crc state 1902 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1903 } 1904 1905 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, false); 1906 1907 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1908 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1909 __ blr(); 1910 1911 // Generate a vanilla native entry as the slow path. 1912 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); 1913 BIND(slow_path); 1914 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); 1915 return start; 1916 } 1917 1918 return NULL; 1919 } 1920 1921 1922 /** 1923 * Method entry for intrinsic-candidate (non-native) methods: 1924 * int java.util.zip.CRC32C.updateBytes( int crc, byte[] b, int off, int end) 1925 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long* buf, int off, int end) 1926 * Unlike CRC32, CRC32C does not have any methods marked as native 1927 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses 1928 **/ 1929 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 1930 if (UseCRC32CIntrinsics) { 1931 address start = __ pc(); // Remember stub start address (is rtn value). 1932 1933 // We don't generate local frame and don't align stack because 1934 // we not even call stub code (we generate the code inline) 1935 // and there is no safepoint on this path. 1936 1937 // Load parameters. 1938 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. 1939 const Register argP = R15_esp; 1940 const Register crc = R3_ARG1; // crc value 1941 const Register data = R4_ARG2; // address of java byte array 1942 const Register dataLen = R5_ARG3; // source data len 1943 const Register tmp = R11_scratch1; 1944 1945 // Arguments are reversed on java expression stack. 1946 // Calculate address of start element. 1947 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateDirectByteBuffer". 1948 BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {"); 1949 // crc @ (SP + 5W) (32bit) 1950 // buf @ (SP + 3W) (64bit ptr to long array) 1951 // off @ (SP + 2W) (32bit) 1952 // dataLen @ (SP + 1W) (32bit) 1953 // data = buf + off 1954 __ ld( data, 3*wordSize, argP); // start of byte buffer 1955 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1956 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1957 __ lwz( crc, 5*wordSize, argP); // current crc state 1958 __ add( data, data, tmp); // Add byte buffer offset. 1959 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1960 } else { // Used for "updateBytes update". 1961 BLOCK_COMMENT("CRC32C_updateBytes {"); 1962 // crc @ (SP + 4W) (32bit) 1963 // buf @ (SP + 3W) (64bit ptr to byte array) 1964 // off @ (SP + 2W) (32bit) 1965 // dataLen @ (SP + 1W) (32bit) 1966 // data = buf + off + base_offset 1967 __ ld( data, 3*wordSize, argP); // start of byte buffer 1968 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset 1969 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process 1970 __ add( data, data, tmp); // add byte buffer offset 1971 __ sub( dataLen, dataLen, tmp); // (end_index - offset) 1972 __ lwz( crc, 4*wordSize, argP); // current crc state 1973 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); 1974 } 1975 1976 __ crc32(crc, data, dataLen, R2, R6, R7, R8, R9, R10, R11, R12, true); 1977 1978 // Restore caller sp for c2i case (from compiled) and for resized sender frame (from interpreted). 1979 __ resize_frame_absolute(R21_sender_SP, R11_scratch1, R0); 1980 __ blr(); 1981 1982 BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}"); 1983 return start; 1984 } 1985 1986 return NULL; 1987 } 1988 1989 // ============================================================================= 1990 // Exceptions 1991 1992 void TemplateInterpreterGenerator::generate_throw_exception() { 1993 Register Rexception = R17_tos, 1994 Rcontinuation = R3_RET; 1995 1996 // -------------------------------------------------------------------------- 1997 // Entry point if an method returns with a pending exception (rethrow). 1998 Interpreter::_rethrow_exception_entry = __ pc(); 1999 { 2000 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. 2001 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 2002 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 2003 2004 // Compiled code destroys templateTableBase, reload. 2005 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 2006 } 2007 2008 // Entry point if a interpreted method throws an exception (throw). 2009 Interpreter::_throw_exception_entry = __ pc(); 2010 { 2011 __ mr(Rexception, R3_RET); 2012 2013 __ verify_thread(); 2014 __ verify_oop(Rexception); 2015 2016 // Expression stack must be empty before entering the VM in case of an exception. 2017 __ empty_expression_stack(); 2018 // Find exception handler address and preserve exception oop. 2019 // Call C routine to find handler and jump to it. 2020 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); 2021 __ mtctr(Rcontinuation); 2022 // Push exception for exception handler bytecodes. 2023 __ push_ptr(Rexception); 2024 2025 // Jump to exception handler (may be remove activation entry!). 2026 __ bctr(); 2027 } 2028 2029 // If the exception is not handled in the current frame the frame is 2030 // removed and the exception is rethrown (i.e. exception 2031 // continuation is _rethrow_exception). 2032 // 2033 // Note: At this point the bci is still the bxi for the instruction 2034 // which caused the exception and the expression stack is 2035 // empty. Thus, for any VM calls at this point, GC will find a legal 2036 // oop map (with empty expression stack). 2037 2038 // In current activation 2039 // tos: exception 2040 // bcp: exception bcp 2041 2042 // -------------------------------------------------------------------------- 2043 // JVMTI PopFrame support 2044 2045 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 2046 { 2047 // Set the popframe_processing bit in popframe_condition indicating that we are 2048 // currently handling popframe, so that call_VMs that may happen later do not 2049 // trigger new popframe handling cycles. 2050 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2051 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); 2052 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2053 2054 // Empty the expression stack, as in normal exception handling. 2055 __ empty_expression_stack(); 2056 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); 2057 2058 // Check to see whether we are returning to a deoptimized frame. 2059 // (The PopFrame call ensures that the caller of the popped frame is 2060 // either interpreted or compiled and deoptimizes it if compiled.) 2061 // Note that we don't compare the return PC against the 2062 // deoptimization blob's unpack entry because of the presence of 2063 // adapter frames in C2. 2064 Label Lcaller_not_deoptimized; 2065 Register return_pc = R3_ARG1; 2066 __ ld(return_pc, 0, R1_SP); 2067 __ ld(return_pc, _abi(lr), return_pc); 2068 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); 2069 __ cmpdi(CCR0, R3_RET, 0); 2070 __ bne(CCR0, Lcaller_not_deoptimized); 2071 2072 // The deoptimized case. 2073 // In this case, we can't call dispatch_next() after the frame is 2074 // popped, but instead must save the incoming arguments and restore 2075 // them after deoptimization has occurred. 2076 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); 2077 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); 2078 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); 2079 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); 2080 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); 2081 // Save these arguments. 2082 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); 2083 2084 // Inform deoptimization that it is responsible for restoring these arguments. 2085 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); 2086 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2087 2088 // Return from the current method into the deoptimization blob. Will eventually 2089 // end up in the deopt interpeter entry, deoptimization prepared everything that 2090 // we will reexecute the call that called us. 2091 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); 2092 __ mtlr(return_pc); 2093 __ blr(); 2094 2095 // The non-deoptimized case. 2096 __ bind(Lcaller_not_deoptimized); 2097 2098 // Clear the popframe condition flag. 2099 __ li(R0, 0); 2100 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 2101 2102 // Get out of the current method and re-execute the call that called us. 2103 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2104 __ restore_interpreter_state(R11_scratch1); 2105 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); 2106 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); 2107 if (ProfileInterpreter) { 2108 __ set_method_data_pointer_for_bcp(); 2109 __ ld(R11_scratch1, 0, R1_SP); 2110 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); 2111 } 2112 #if INCLUDE_JVMTI 2113 Label L_done; 2114 2115 __ lbz(R11_scratch1, 0, R14_bcp); 2116 __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic); 2117 __ bne(CCR0, L_done); 2118 2119 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 2120 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 2121 __ ld(R4_ARG2, 0, R18_locals); 2122 __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false); 2123 __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); 2124 __ cmpdi(CCR0, R4_ARG2, 0); 2125 __ beq(CCR0, L_done); 2126 __ std(R4_ARG2, wordSize, R15_esp); 2127 __ bind(L_done); 2128 #endif // INCLUDE_JVMTI 2129 __ dispatch_next(vtos); 2130 } 2131 // end of JVMTI PopFrame support 2132 2133 // -------------------------------------------------------------------------- 2134 // Remove activation exception entry. 2135 // This is jumped to if an interpreted method can't handle an exception itself 2136 // (we come from the throw/rethrow exception entry above). We're going to call 2137 // into the VM to find the exception handler in the caller, pop the current 2138 // frame and return the handler we calculated. 2139 Interpreter::_remove_activation_entry = __ pc(); 2140 { 2141 __ pop_ptr(Rexception); 2142 __ verify_thread(); 2143 __ verify_oop(Rexception); 2144 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); 2145 2146 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); 2147 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); 2148 2149 __ get_vm_result(Rexception); 2150 2151 // We are done with this activation frame; find out where to go next. 2152 // The continuation point will be an exception handler, which expects 2153 // the following registers set up: 2154 // 2155 // RET: exception oop 2156 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. 2157 2158 Register return_pc = R31; // Needs to survive the runtime call. 2159 __ ld(return_pc, 0, R1_SP); 2160 __ ld(return_pc, _abi(lr), return_pc); 2161 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); 2162 2163 // Remove the current activation. 2164 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); 2165 2166 __ mr(R4_ARG2, return_pc); 2167 __ mtlr(R3_RET); 2168 __ mr(R3_RET, Rexception); 2169 __ blr(); 2170 } 2171 } 2172 2173 // JVMTI ForceEarlyReturn support. 2174 // Returns "in the middle" of a method with a "fake" return value. 2175 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 2176 2177 Register Rscratch1 = R11_scratch1, 2178 Rscratch2 = R12_scratch2; 2179 2180 address entry = __ pc(); 2181 __ empty_expression_stack(); 2182 2183 __ load_earlyret_value(state, Rscratch1); 2184 2185 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 2186 // Clear the earlyret state. 2187 __ li(R0, 0); 2188 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); 2189 2190 __ remove_activation(state, false, false); 2191 // Copied from TemplateTable::_return. 2192 // Restoration of lr done by remove_activation. 2193 switch (state) { 2194 // Narrow result if state is itos but result type is smaller. 2195 case btos: 2196 case ztos: 2197 case ctos: 2198 case stos: 2199 case itos: __ narrow(R17_tos); /* fall through */ 2200 case ltos: 2201 case atos: __ mr(R3_RET, R17_tos); break; 2202 case ftos: 2203 case dtos: __ fmr(F1_RET, F15_ftos); break; 2204 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2205 // to get visible before the reference to the object gets stored anywhere. 2206 __ membar(Assembler::StoreStore); break; 2207 default : ShouldNotReachHere(); 2208 } 2209 __ blr(); 2210 2211 return entry; 2212 } // end of ForceEarlyReturn support 2213 2214 //----------------------------------------------------------------------------- 2215 // Helper for vtos entry point generation 2216 2217 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, 2218 address& bep, 2219 address& cep, 2220 address& sep, 2221 address& aep, 2222 address& iep, 2223 address& lep, 2224 address& fep, 2225 address& dep, 2226 address& vep) { 2227 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 2228 Label L; 2229 2230 aep = __ pc(); __ push_ptr(); __ b(L); 2231 fep = __ pc(); __ push_f(); __ b(L); 2232 dep = __ pc(); __ push_d(); __ b(L); 2233 lep = __ pc(); __ push_l(); __ b(L); 2234 __ align(32, 12, 24); // align L 2235 bep = cep = sep = 2236 iep = __ pc(); __ push_i(); 2237 vep = __ pc(); 2238 __ bind(L); 2239 generate_and_dispatch(t); 2240 } 2241 2242 //----------------------------------------------------------------------------- 2243 2244 // Non-product code 2245 #ifndef PRODUCT 2246 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 2247 //__ flush_bundle(); 2248 address entry = __ pc(); 2249 2250 const char *bname = NULL; 2251 uint tsize = 0; 2252 switch(state) { 2253 case ftos: 2254 bname = "trace_code_ftos {"; 2255 tsize = 2; 2256 break; 2257 case btos: 2258 bname = "trace_code_btos {"; 2259 tsize = 2; 2260 break; 2261 case ztos: 2262 bname = "trace_code_ztos {"; 2263 tsize = 2; 2264 break; 2265 case ctos: 2266 bname = "trace_code_ctos {"; 2267 tsize = 2; 2268 break; 2269 case stos: 2270 bname = "trace_code_stos {"; 2271 tsize = 2; 2272 break; 2273 case itos: 2274 bname = "trace_code_itos {"; 2275 tsize = 2; 2276 break; 2277 case ltos: 2278 bname = "trace_code_ltos {"; 2279 tsize = 3; 2280 break; 2281 case atos: 2282 bname = "trace_code_atos {"; 2283 tsize = 2; 2284 break; 2285 case vtos: 2286 // Note: In case of vtos, the topmost of stack value could be a int or doubl 2287 // In case of a double (2 slots) we won't see the 2nd stack value. 2288 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. 2289 bname = "trace_code_vtos {"; 2290 tsize = 2; 2291 2292 break; 2293 case dtos: 2294 bname = "trace_code_dtos {"; 2295 tsize = 3; 2296 break; 2297 default: 2298 ShouldNotReachHere(); 2299 } 2300 BLOCK_COMMENT(bname); 2301 2302 // Support short-cut for TraceBytecodesAt. 2303 // Don't call into the VM if we don't want to trace to speed up things. 2304 Label Lskip_vm_call; 2305 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2306 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); 2307 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2308 __ ld(R11_scratch1, offs1, R11_scratch1); 2309 __ lwa(R12_scratch2, offs2, R12_scratch2); 2310 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 2311 __ blt(CCR0, Lskip_vm_call); 2312 } 2313 2314 __ push(state); 2315 // Load 2 topmost expression stack values. 2316 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); 2317 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); 2318 __ mflr(R31); 2319 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); 2320 __ mtlr(R31); 2321 __ pop(state); 2322 2323 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { 2324 __ bind(Lskip_vm_call); 2325 } 2326 __ blr(); 2327 BLOCK_COMMENT("} trace_code"); 2328 return entry; 2329 } 2330 2331 void TemplateInterpreterGenerator::count_bytecode() { 2332 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); 2333 __ lwz(R12_scratch2, offs, R11_scratch1); 2334 __ addi(R12_scratch2, R12_scratch2, 1); 2335 __ stw(R12_scratch2, offs, R11_scratch1); 2336 } 2337 2338 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 2339 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); 2340 __ lwz(R12_scratch2, offs, R11_scratch1); 2341 __ addi(R12_scratch2, R12_scratch2, 1); 2342 __ stw(R12_scratch2, offs, R11_scratch1); 2343 } 2344 2345 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 2346 const Register addr = R11_scratch1, 2347 tmp = R12_scratch2; 2348 // Get index, shift out old bytecode, bring in new bytecode, and store it. 2349 // _index = (_index >> log2_number_of_codes) | 2350 // (bytecode << log2_number_of_codes); 2351 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); 2352 __ lwz(tmp, offs1, addr); 2353 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); 2354 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 2355 __ stw(tmp, offs1, addr); 2356 2357 // Bump bucket contents. 2358 // _counters[_index] ++; 2359 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); 2360 __ sldi(tmp, tmp, LogBytesPerInt); 2361 __ add(addr, tmp, addr); 2362 __ lwz(tmp, offs2, addr); 2363 __ addi(tmp, tmp, 1); 2364 __ stw(tmp, offs2, addr); 2365 } 2366 2367 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 2368 // Call a little run-time stub to avoid blow-up for each bytecode. 2369 // The run-time runtime saves the right registers, depending on 2370 // the tosca in-state for the given template. 2371 2372 assert(Interpreter::trace_code(t->tos_in()) != NULL, 2373 "entry must have been generated"); 2374 2375 // Note: we destroy LR here. 2376 __ bl(Interpreter::trace_code(t->tos_in())); 2377 } 2378 2379 void TemplateInterpreterGenerator::stop_interpreter_at() { 2380 Label L; 2381 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); 2382 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); 2383 __ ld(R11_scratch1, offs1, R11_scratch1); 2384 __ lwa(R12_scratch2, offs2, R12_scratch2); 2385 __ cmpd(CCR0, R12_scratch2, R11_scratch1); 2386 __ bne(CCR0, L); 2387 __ illtrap(); 2388 __ bind(L); 2389 } 2390 2391 #endif // !PRODUCT --- EOF ---