1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interp_masm_sparc.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/interpreterRuntime.hpp" 29 #include "oops/arrayOop.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/methodData.hpp" 32 #include "oops/method.hpp" 33 #include "oops/methodCounters.hpp" 34 #include "prims/jvmtiExport.hpp" 35 #include "prims/jvmtiRedefineClassesTrace.hpp" 36 #include "prims/jvmtiThreadState.hpp" 37 #include "runtime/basicLock.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/thread.inline.hpp" 41 42 #ifndef CC_INTERP 43 #ifndef FAST_DISPATCH 44 #define FAST_DISPATCH 1 45 #endif 46 #undef FAST_DISPATCH 47 48 // Implementation of InterpreterMacroAssembler 49 50 // This file specializes the assember with interpreter-specific macros 51 52 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 53 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 54 55 #else // CC_INTERP 56 #ifndef STATE 57 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) 58 #endif // STATE 59 60 #endif // CC_INTERP 61 62 void InterpreterMacroAssembler::jump_to_entry(address entry) { 63 assert(entry, "Entry must have been generated by now"); 64 AddressLiteral al(entry); 65 jump_to(al, G3_scratch); 66 delayed()->nop(); 67 } 68 69 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 70 // Note: this algorithm is also used by C1's OSR entry sequence. 71 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 72 assert_different_registers(args_size, locals_size); 73 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 74 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 75 // Use br/mov combination because it works on both V8 and V9 and is 76 // faster. 77 Label skip_move; 78 br(Assembler::negative, true, Assembler::pt, skip_move); 79 delayed()->mov(G0, delta); 80 bind(skip_move); 81 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 82 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 83 } 84 85 #ifndef CC_INTERP 86 87 // Dispatch code executed in the prolog of a bytecode which does not do it's 88 // own dispatch. The dispatch address is computed and placed in IdispatchAddress 89 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 90 assert_not_delayed(); 91 #ifdef FAST_DISPATCH 92 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 93 // they both use I2. 94 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 95 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 96 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 97 // add offset to correct dispatch table 98 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 99 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 100 #else 101 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 102 // dispatch table to use 103 AddressLiteral tbl(Interpreter::dispatch_table(state)); 104 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 105 set(tbl, G3_scratch); // compute addr of table 106 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 107 #endif 108 } 109 110 111 // Dispatch code executed in the epilog of a bytecode which does not do it's 112 // own dispatch. The dispatch address in IdispatchAddress is used for the 113 // dispatch. 114 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 115 assert_not_delayed(); 116 verify_FPU(1, state); 117 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 118 jmp( IdispatchAddress, 0 ); 119 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 120 else delayed()->nop(); 121 } 122 123 124 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 125 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 126 assert_not_delayed(); 127 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 128 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 129 } 130 131 132 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 133 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 134 assert_not_delayed(); 135 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 136 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 137 } 138 139 140 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 141 // load current bytecode 142 assert_not_delayed(); 143 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 144 dispatch_base(state, table); 145 } 146 147 148 void InterpreterMacroAssembler::call_VM_leaf_base( 149 Register java_thread, 150 address entry_point, 151 int number_of_arguments 152 ) { 153 if (!java_thread->is_valid()) 154 java_thread = L7_thread_cache; 155 // super call 156 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 157 } 158 159 160 void InterpreterMacroAssembler::call_VM_base( 161 Register oop_result, 162 Register java_thread, 163 Register last_java_sp, 164 address entry_point, 165 int number_of_arguments, 166 bool check_exception 167 ) { 168 if (!java_thread->is_valid()) 169 java_thread = L7_thread_cache; 170 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 171 // takes responsibility for setting its own thread-state on call-out. 172 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 173 174 //save_bcp(); // save bcp 175 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 176 //restore_bcp(); // restore bcp 177 //restore_locals(); // restore locals pointer 178 } 179 180 181 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 182 if (JvmtiExport::can_pop_frame()) { 183 Label L; 184 185 // Check the "pending popframe condition" flag in the current thread 186 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 187 188 // Initiate popframe handling only if it is not already being processed. If the flag 189 // has the popframe_processing bit set, it means that this code is called *during* popframe 190 // handling - we don't want to reenter. 191 btst(JavaThread::popframe_pending_bit, scratch_reg); 192 br(zero, false, pt, L); 193 delayed()->nop(); 194 btst(JavaThread::popframe_processing_bit, scratch_reg); 195 br(notZero, false, pt, L); 196 delayed()->nop(); 197 198 // Call Interpreter::remove_activation_preserving_args_entry() to get the 199 // address of the same-named entrypoint in the generated interpreter code. 200 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 201 202 // Jump to Interpreter::_remove_activation_preserving_args_entry 203 jmpl(O0, G0, G0); 204 delayed()->nop(); 205 bind(L); 206 } 207 } 208 209 210 void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 211 Register thr_state = G4_scratch; 212 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 213 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 214 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 215 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 216 switch (state) { 217 case ltos: ld_long(val_addr, Otos_l); break; 218 case atos: ld_ptr(oop_addr, Otos_l); 219 st_ptr(G0, oop_addr); break; 220 case btos: // fall through 221 case ctos: // fall through 222 case stos: // fall through 223 case itos: ld(val_addr, Otos_l1); break; 224 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 225 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 226 case vtos: /* nothing to do */ break; 227 default : ShouldNotReachHere(); 228 } 229 // Clean up tos value in the jvmti thread state 230 or3(G0, ilgl, G3_scratch); 231 stw(G3_scratch, tos_addr); 232 st_long(G0, val_addr); 233 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 234 } 235 236 237 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 238 if (JvmtiExport::can_force_early_return()) { 239 Label L; 240 Register thr_state = G3_scratch; 241 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 242 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 243 244 // Initiate earlyret handling only if it is not already being processed. 245 // If the flag has the earlyret_processing bit set, it means that this code 246 // is called *during* earlyret handling - we don't want to reenter. 247 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 248 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 249 250 // Call Interpreter::remove_activation_early_entry() to get the address of the 251 // same-named entrypoint in the generated interpreter code 252 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 253 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 254 255 // Jump to Interpreter::_remove_activation_early_entry 256 jmpl(O0, G0, G0); 257 delayed()->nop(); 258 bind(L); 259 } 260 } 261 262 263 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 264 mov(arg_1, O0); 265 mov(arg_2, O1); 266 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 267 } 268 #endif /* CC_INTERP */ 269 270 271 #ifndef CC_INTERP 272 273 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 274 assert_not_delayed(); 275 dispatch_Lbyte_code(state, table); 276 } 277 278 279 void InterpreterMacroAssembler::dispatch_normal(TosState state) { 280 dispatch_base(state, Interpreter::normal_table(state)); 281 } 282 283 284 void InterpreterMacroAssembler::dispatch_only(TosState state) { 285 dispatch_base(state, Interpreter::dispatch_table(state)); 286 } 287 288 289 // common code to dispatch and dispatch_only 290 // dispatch value in Lbyte_code and increment Lbcp 291 292 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 293 verify_FPU(1, state); 294 // %%%%% maybe implement +VerifyActivationFrameSize here 295 //verify_thread(); //too slow; we will just verify on method entry & exit 296 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 297 #ifdef FAST_DISPATCH 298 if (table == Interpreter::dispatch_table(state)) { 299 // use IdispatchTables 300 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 301 // add offset to correct dispatch table 302 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 303 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 304 } else { 305 #endif 306 // dispatch table to use 307 AddressLiteral tbl(table); 308 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 309 set(tbl, G3_scratch); // compute addr of table 310 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 311 #ifdef FAST_DISPATCH 312 } 313 #endif 314 jmp( G3_scratch, 0 ); 315 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 316 else delayed()->nop(); 317 } 318 319 320 // Helpers for expression stack 321 322 // Longs and doubles are Category 2 computational types in the 323 // JVM specification (section 3.11.1) and take 2 expression stack or 324 // local slots. 325 // Aligning them on 32 bit with tagged stacks is hard because the code generated 326 // for the dup* bytecodes depends on what types are already on the stack. 327 // If the types are split into the two stack/local slots, that is much easier 328 // (and we can use 0 for non-reference tags). 329 330 // Known good alignment in _LP64 but unknown otherwise 331 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 332 assert_not_delayed(); 333 334 #ifdef _LP64 335 ldf(FloatRegisterImpl::D, r1, offset, d); 336 #else 337 ldf(FloatRegisterImpl::S, r1, offset, d); 338 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 339 #endif 340 } 341 342 // Known good alignment in _LP64 but unknown otherwise 343 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 344 assert_not_delayed(); 345 346 #ifdef _LP64 347 stf(FloatRegisterImpl::D, d, r1, offset); 348 // store something more useful here 349 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 350 #else 351 stf(FloatRegisterImpl::S, d, r1, offset); 352 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 353 #endif 354 } 355 356 357 // Known good alignment in _LP64 but unknown otherwise 358 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 359 assert_not_delayed(); 360 #ifdef _LP64 361 ldx(r1, offset, rd); 362 #else 363 ld(r1, offset, rd); 364 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 365 #endif 366 } 367 368 // Known good alignment in _LP64 but unknown otherwise 369 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 370 assert_not_delayed(); 371 372 #ifdef _LP64 373 stx(l, r1, offset); 374 // store something more useful here 375 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 376 #else 377 st(l, r1, offset); 378 st(l->successor(), r1, offset + Interpreter::stackElementSize); 379 #endif 380 } 381 382 void InterpreterMacroAssembler::pop_i(Register r) { 383 assert_not_delayed(); 384 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 385 inc(Lesp, Interpreter::stackElementSize); 386 debug_only(verify_esp(Lesp)); 387 } 388 389 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 390 assert_not_delayed(); 391 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 392 inc(Lesp, Interpreter::stackElementSize); 393 debug_only(verify_esp(Lesp)); 394 } 395 396 void InterpreterMacroAssembler::pop_l(Register r) { 397 assert_not_delayed(); 398 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 399 inc(Lesp, 2*Interpreter::stackElementSize); 400 debug_only(verify_esp(Lesp)); 401 } 402 403 404 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 405 assert_not_delayed(); 406 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 407 inc(Lesp, Interpreter::stackElementSize); 408 debug_only(verify_esp(Lesp)); 409 } 410 411 412 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 413 assert_not_delayed(); 414 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 415 inc(Lesp, 2*Interpreter::stackElementSize); 416 debug_only(verify_esp(Lesp)); 417 } 418 419 420 void InterpreterMacroAssembler::push_i(Register r) { 421 assert_not_delayed(); 422 debug_only(verify_esp(Lesp)); 423 st(r, Lesp, 0); 424 dec(Lesp, Interpreter::stackElementSize); 425 } 426 427 void InterpreterMacroAssembler::push_ptr(Register r) { 428 assert_not_delayed(); 429 st_ptr(r, Lesp, 0); 430 dec(Lesp, Interpreter::stackElementSize); 431 } 432 433 // remember: our convention for longs in SPARC is: 434 // O0 (Otos_l1) has high-order part in first word, 435 // O1 (Otos_l2) has low-order part in second word 436 437 void InterpreterMacroAssembler::push_l(Register r) { 438 assert_not_delayed(); 439 debug_only(verify_esp(Lesp)); 440 // Longs are stored in memory-correct order, even if unaligned. 441 int offset = -Interpreter::stackElementSize; 442 store_unaligned_long(r, Lesp, offset); 443 dec(Lesp, 2 * Interpreter::stackElementSize); 444 } 445 446 447 void InterpreterMacroAssembler::push_f(FloatRegister f) { 448 assert_not_delayed(); 449 debug_only(verify_esp(Lesp)); 450 stf(FloatRegisterImpl::S, f, Lesp, 0); 451 dec(Lesp, Interpreter::stackElementSize); 452 } 453 454 455 void InterpreterMacroAssembler::push_d(FloatRegister d) { 456 assert_not_delayed(); 457 debug_only(verify_esp(Lesp)); 458 // Longs are stored in memory-correct order, even if unaligned. 459 int offset = -Interpreter::stackElementSize; 460 store_unaligned_double(d, Lesp, offset); 461 dec(Lesp, 2 * Interpreter::stackElementSize); 462 } 463 464 465 void InterpreterMacroAssembler::push(TosState state) { 466 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 467 switch (state) { 468 case atos: push_ptr(); break; 469 case btos: push_i(); break; 470 case ctos: 471 case stos: push_i(); break; 472 case itos: push_i(); break; 473 case ltos: push_l(); break; 474 case ftos: push_f(); break; 475 case dtos: push_d(); break; 476 case vtos: /* nothing to do */ break; 477 default : ShouldNotReachHere(); 478 } 479 } 480 481 482 void InterpreterMacroAssembler::pop(TosState state) { 483 switch (state) { 484 case atos: pop_ptr(); break; 485 case btos: pop_i(); break; 486 case ctos: 487 case stos: pop_i(); break; 488 case itos: pop_i(); break; 489 case ltos: pop_l(); break; 490 case ftos: pop_f(); break; 491 case dtos: pop_d(); break; 492 case vtos: /* nothing to do */ break; 493 default : ShouldNotReachHere(); 494 } 495 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 496 } 497 498 499 // Helpers for swap and dup 500 void InterpreterMacroAssembler::load_ptr(int n, Register val) { 501 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 502 } 503 void InterpreterMacroAssembler::store_ptr(int n, Register val) { 504 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 505 } 506 507 508 void InterpreterMacroAssembler::load_receiver(Register param_count, 509 Register recv) { 510 sll(param_count, Interpreter::logStackElementSize, param_count); 511 ld_ptr(Lesp, param_count, recv); // gets receiver oop 512 } 513 514 void InterpreterMacroAssembler::empty_expression_stack() { 515 // Reset Lesp. 516 sub( Lmonitors, wordSize, Lesp ); 517 518 // Reset SP by subtracting more space from Lesp. 519 Label done; 520 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 521 522 // A native does not need to do this, since its callee does not change SP. 523 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 524 btst(JVM_ACC_NATIVE, Gframe_size); 525 br(Assembler::notZero, false, Assembler::pt, done); 526 delayed()->nop(); 527 528 // Compute max expression stack+register save area 529 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); 530 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. 531 add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size ); 532 533 // 534 // now set up a stack frame with the size computed above 535 // 536 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 537 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 538 sub( Lesp, Gframe_size, Gframe_size ); 539 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 540 debug_only(verify_sp(Gframe_size, G4_scratch)); 541 #ifdef _LP64 542 sub(Gframe_size, STACK_BIAS, Gframe_size ); 543 #endif 544 mov(Gframe_size, SP); 545 546 bind(done); 547 } 548 549 550 #ifdef ASSERT 551 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 552 Label Bad, OK; 553 554 // Saved SP must be aligned. 555 #ifdef _LP64 556 btst(2*BytesPerWord-1, Rsp); 557 #else 558 btst(LongAlignmentMask, Rsp); 559 #endif 560 br(Assembler::notZero, false, Assembler::pn, Bad); 561 delayed()->nop(); 562 563 // Saved SP, plus register window size, must not be above FP. 564 add(Rsp, frame::register_save_words * wordSize, Rtemp); 565 #ifdef _LP64 566 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 567 #endif 568 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 569 570 // Saved SP must not be ridiculously below current SP. 571 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 572 set(maxstack, Rtemp); 573 sub(SP, Rtemp, Rtemp); 574 #ifdef _LP64 575 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 576 #endif 577 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 578 579 ba_short(OK); 580 581 bind(Bad); 582 stop("on return to interpreted call, restored SP is corrupted"); 583 584 bind(OK); 585 } 586 587 588 void InterpreterMacroAssembler::verify_esp(Register Resp) { 589 // about to read or write Resp[0] 590 // make sure it is not in the monitors or the register save area 591 Label OK1, OK2; 592 593 cmp(Resp, Lmonitors); 594 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 595 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 596 stop("too many pops: Lesp points into monitor area"); 597 bind(OK1); 598 #ifdef _LP64 599 sub(Resp, STACK_BIAS, Resp); 600 #endif 601 cmp(Resp, SP); 602 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 603 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 604 stop("too many pushes: Lesp points into register window"); 605 bind(OK2); 606 } 607 #endif // ASSERT 608 609 // Load compiled (i2c) or interpreter entry when calling from interpreted and 610 // do the call. Centralized so that all interpreter calls will do the same actions. 611 // If jvmti single stepping is on for a thread we must not call compiled code. 612 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 613 614 // Assume we want to go compiled if available 615 616 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 617 618 if (JvmtiExport::can_post_interpreter_events()) { 619 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 620 // compiled code in threads for which the event is enabled. Check here for 621 // interp_only_mode if these events CAN be enabled. 622 verify_thread(); 623 Label skip_compiled_code; 624 625 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 626 ld(interp_only, scratch); 627 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 628 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 629 bind(skip_compiled_code); 630 } 631 632 // the i2c_adapters need Method* in G5_method (right? %%%) 633 // do the call 634 #ifdef ASSERT 635 { 636 Label ok; 637 br_notnull_short(target, Assembler::pt, ok); 638 stop("null entry point"); 639 bind(ok); 640 } 641 #endif // ASSERT 642 643 // Adjust Rret first so Llast_SP can be same as Rret 644 add(Rret, -frame::pc_return_offset, O7); 645 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 646 // Record SP so we can remove any stack space allocated by adapter transition 647 jmp(target, 0); 648 delayed()->mov(SP, Llast_SP); 649 } 650 651 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 652 assert_not_delayed(); 653 654 Label not_taken; 655 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 656 else br (cc, false, Assembler::pn, not_taken); 657 delayed()->nop(); 658 659 TemplateTable::branch(false,false); 660 661 bind(not_taken); 662 663 profile_not_taken_branch(G3_scratch); 664 } 665 666 667 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 668 int bcp_offset, 669 Register Rtmp, 670 Register Rdst, 671 signedOrNot is_signed, 672 setCCOrNot should_set_CC ) { 673 assert(Rtmp != Rdst, "need separate temp register"); 674 assert_not_delayed(); 675 switch (is_signed) { 676 default: ShouldNotReachHere(); 677 678 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 679 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 680 } 681 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 682 sll( Rdst, BitsPerByte, Rdst); 683 switch (should_set_CC ) { 684 default: ShouldNotReachHere(); 685 686 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 687 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 688 } 689 } 690 691 692 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 693 int bcp_offset, 694 Register Rtmp, 695 Register Rdst, 696 setCCOrNot should_set_CC ) { 697 assert(Rtmp != Rdst, "need separate temp register"); 698 assert_not_delayed(); 699 add( Lbcp, bcp_offset, Rtmp); 700 andcc( Rtmp, 3, G0); 701 Label aligned; 702 switch (should_set_CC ) { 703 default: ShouldNotReachHere(); 704 705 case set_CC: break; 706 case dont_set_CC: break; 707 } 708 709 br(Assembler::zero, true, Assembler::pn, aligned); 710 #ifdef _LP64 711 delayed()->ldsw(Rtmp, 0, Rdst); 712 #else 713 delayed()->ld(Rtmp, 0, Rdst); 714 #endif 715 716 ldub(Lbcp, bcp_offset + 3, Rdst); 717 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 718 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 719 #ifdef _LP64 720 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 721 #else 722 // Unsigned load is faster than signed on some implementations 723 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 724 #endif 725 or3(Rtmp, Rdst, Rdst ); 726 727 bind(aligned); 728 if (should_set_CC == set_CC) tst(Rdst); 729 } 730 731 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 732 int bcp_offset, size_t index_size) { 733 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 734 if (index_size == sizeof(u2)) { 735 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 736 } else if (index_size == sizeof(u4)) { 737 get_4_byte_integer_at_bcp(bcp_offset, temp, index); 738 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 739 xor3(index, -1, index); // convert to plain index 740 } else if (index_size == sizeof(u1)) { 741 ldub(Lbcp, bcp_offset, index); 742 } else { 743 ShouldNotReachHere(); 744 } 745 } 746 747 748 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 749 int bcp_offset, size_t index_size) { 750 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 751 assert_different_registers(cache, tmp); 752 assert_not_delayed(); 753 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 754 // convert from field index to ConstantPoolCacheEntry index and from 755 // word index to byte offset 756 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 757 add(LcpoolCache, tmp, cache); 758 } 759 760 761 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 762 Register temp, 763 Register bytecode, 764 int byte_no, 765 int bcp_offset, 766 size_t index_size) { 767 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 768 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 769 const int shift_count = (1 + byte_no) * BitsPerByte; 770 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 771 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 772 "correct shift count"); 773 srl(bytecode, shift_count, bytecode); 774 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 775 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); 776 } 777 778 779 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 780 int bcp_offset, size_t index_size) { 781 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 782 assert_different_registers(cache, tmp); 783 assert_not_delayed(); 784 if (index_size == sizeof(u2)) { 785 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 786 } else { 787 ShouldNotReachHere(); // other sizes not supported here 788 } 789 // convert from field index to ConstantPoolCacheEntry index 790 // and from word index to byte offset 791 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 792 // skip past the header 793 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 794 // construct pointer to cache entry 795 add(LcpoolCache, tmp, cache); 796 } 797 798 799 // Load object from cpool->resolved_references(index) 800 void InterpreterMacroAssembler::load_resolved_reference_at_index( 801 Register result, Register index) { 802 assert_different_registers(result, index); 803 assert_not_delayed(); 804 // convert from field index to resolved_references() index and from 805 // word index to byte offset. Since this is a java object, it can be compressed 806 Register tmp = index; // reuse 807 sll(index, LogBytesPerHeapOop, tmp); 808 get_constant_pool(result); 809 // load pointer for resolved_references[] objArray 810 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 811 // JNIHandles::resolve(result) 812 ld_ptr(result, 0, result); 813 // Add in the index 814 add(result, tmp, result); 815 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 816 } 817 818 819 // Generate a subtype check: branch to ok_is_subtype if sub_klass is 820 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 821 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 822 Register Rsuper_klass, 823 Register Rtmp1, 824 Register Rtmp2, 825 Register Rtmp3, 826 Label &ok_is_subtype ) { 827 Label not_subtype; 828 829 // Profile the not-null value's klass. 830 profile_typecheck(Rsub_klass, Rtmp1); 831 832 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 833 Rtmp1, Rtmp2, 834 &ok_is_subtype, ¬_subtype, NULL); 835 836 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 837 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 838 &ok_is_subtype, NULL); 839 840 bind(not_subtype); 841 profile_typecheck_failed(Rtmp1); 842 } 843 844 // Separate these two to allow for delay slot in middle 845 // These are used to do a test and full jump to exception-throwing code. 846 847 // %%%%% Could possibly reoptimize this by testing to see if could use 848 // a single conditional branch (i.e. if span is small enough. 849 // If you go that route, than get rid of the split and give up 850 // on the delay-slot hack. 851 852 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 853 Label& ok ) { 854 assert_not_delayed(); 855 br(ok_condition, true, pt, ok); 856 // DELAY SLOT 857 } 858 859 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 860 Label& ok ) { 861 assert_not_delayed(); 862 bp( ok_condition, true, Assembler::xcc, pt, ok); 863 // DELAY SLOT 864 } 865 866 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 867 Label& ok ) { 868 assert_not_delayed(); 869 brx(ok_condition, true, pt, ok); 870 // DELAY SLOT 871 } 872 873 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 874 Register Rscratch, 875 Label& ok ) { 876 assert(throw_entry_point != NULL, "entry point must be generated by now"); 877 AddressLiteral dest(throw_entry_point); 878 jump_to(dest, Rscratch); 879 delayed()->nop(); 880 bind(ok); 881 } 882 883 884 // And if you cannot use the delay slot, here is a shorthand: 885 886 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 887 address throw_entry_point, 888 Register Rscratch ) { 889 Label ok; 890 if (ok_condition != never) { 891 throw_if_not_1_icc( ok_condition, ok); 892 delayed()->nop(); 893 } 894 throw_if_not_2( throw_entry_point, Rscratch, ok); 895 } 896 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 897 address throw_entry_point, 898 Register Rscratch ) { 899 Label ok; 900 if (ok_condition != never) { 901 throw_if_not_1_xcc( ok_condition, ok); 902 delayed()->nop(); 903 } 904 throw_if_not_2( throw_entry_point, Rscratch, ok); 905 } 906 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 907 address throw_entry_point, 908 Register Rscratch ) { 909 Label ok; 910 if (ok_condition != never) { 911 throw_if_not_1_x( ok_condition, ok); 912 delayed()->nop(); 913 } 914 throw_if_not_2( throw_entry_point, Rscratch, ok); 915 } 916 917 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 918 // Note: res is still shy of address by array offset into object. 919 920 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 921 assert_not_delayed(); 922 923 verify_oop(array); 924 #ifdef _LP64 925 // sign extend since tos (index) can be a 32bit value 926 sra(index, G0, index); 927 #endif // _LP64 928 929 // check array 930 Label ptr_ok; 931 tst(array); 932 throw_if_not_1_x( notZero, ptr_ok ); 933 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 934 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 935 936 Label index_ok; 937 cmp(index, tmp); 938 throw_if_not_1_icc( lessUnsigned, index_ok ); 939 if (index_shift > 0) delayed()->sll(index, index_shift, index); 940 else delayed()->add(array, index, res); // addr - const offset in index 941 // convention: move aberrant index into G3_scratch for exception message 942 mov(index, G3_scratch); 943 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 944 945 // add offset if didn't do it in delay slot 946 if (index_shift > 0) add(array, index, res); // addr - const offset in index 947 } 948 949 950 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 951 assert_not_delayed(); 952 953 // pop array 954 pop_ptr(array); 955 956 // check array 957 index_check_without_pop(array, index, index_shift, tmp, res); 958 } 959 960 961 void InterpreterMacroAssembler::get_const(Register Rdst) { 962 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 963 } 964 965 966 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 967 get_const(Rdst); 968 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 969 } 970 971 972 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 973 get_constant_pool(Rdst); 974 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 975 } 976 977 978 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 979 get_constant_pool(Rcpool); 980 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 981 } 982 983 984 // unlock if synchronized method 985 // 986 // Unlock the receiver if this is a synchronized method. 987 // Unlock any Java monitors from syncronized blocks. 988 // 989 // If there are locked Java monitors 990 // If throw_monitor_exception 991 // throws IllegalMonitorStateException 992 // Else if install_monitor_exception 993 // installs IllegalMonitorStateException 994 // Else 995 // no error processing 996 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 997 bool throw_monitor_exception, 998 bool install_monitor_exception) { 999 Label unlocked, unlock, no_unlock; 1000 1001 // get the value of _do_not_unlock_if_synchronized into G1_scratch 1002 const Address do_not_unlock_if_synchronized(G2_thread, 1003 JavaThread::do_not_unlock_if_synchronized_offset()); 1004 ldbool(do_not_unlock_if_synchronized, G1_scratch); 1005 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 1006 1007 // check if synchronized method 1008 const Address access_flags(Lmethod, Method::access_flags_offset()); 1009 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1010 push(state); // save tos 1011 ld(access_flags, G3_scratch); // Load access flags. 1012 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 1013 br(zero, false, pt, unlocked); 1014 delayed()->nop(); 1015 1016 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1017 // is set. 1018 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 1019 delayed()->nop(); 1020 1021 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1022 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1023 1024 //Intel: if (throw_monitor_exception) ... else ... 1025 // Entry already unlocked, need to throw exception 1026 //... 1027 1028 // pass top-most monitor elem 1029 add( top_most_monitor(), O1 ); 1030 1031 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1032 br_notnull_short(G3_scratch, pt, unlock); 1033 1034 if (throw_monitor_exception) { 1035 // Entry already unlocked need to throw an exception 1036 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1037 should_not_reach_here(); 1038 } else { 1039 // Monitor already unlocked during a stack unroll. 1040 // If requested, install an illegal_monitor_state_exception. 1041 // Continue with stack unrolling. 1042 if (install_monitor_exception) { 1043 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1044 } 1045 ba_short(unlocked); 1046 } 1047 1048 bind(unlock); 1049 1050 unlock_object(O1); 1051 1052 bind(unlocked); 1053 1054 // I0, I1: Might contain return value 1055 1056 // Check that all monitors are unlocked 1057 { Label loop, exception, entry, restart; 1058 1059 Register Rmptr = O0; 1060 Register Rtemp = O1; 1061 Register Rlimit = Lmonitors; 1062 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1063 assert( (delta & LongAlignmentMask) == 0, 1064 "sizeof BasicObjectLock must be even number of doublewords"); 1065 1066 #ifdef ASSERT 1067 add(top_most_monitor(), Rmptr, delta); 1068 { Label L; 1069 // ensure that Rmptr starts out above (or at) Rlimit 1070 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1071 stop("monitor stack has negative size"); 1072 bind(L); 1073 } 1074 #endif 1075 bind(restart); 1076 ba(entry); 1077 delayed()-> 1078 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1079 1080 // Entry is still locked, need to throw exception 1081 bind(exception); 1082 if (throw_monitor_exception) { 1083 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1084 should_not_reach_here(); 1085 } else { 1086 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1087 // Unlock does not block, so don't have to worry about the frame 1088 unlock_object(Rmptr); 1089 if (install_monitor_exception) { 1090 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1091 } 1092 ba_short(restart); 1093 } 1094 1095 bind(loop); 1096 cmp(Rtemp, G0); // check if current entry is used 1097 brx(Assembler::notEqual, false, pn, exception); 1098 delayed()-> 1099 dec(Rmptr, delta); // otherwise advance to next entry 1100 #ifdef ASSERT 1101 { Label L; 1102 // ensure that Rmptr has not somehow stepped below Rlimit 1103 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1104 stop("ran off the end of the monitor stack"); 1105 bind(L); 1106 } 1107 #endif 1108 bind(entry); 1109 cmp(Rmptr, Rlimit); // check if bottom reached 1110 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1111 delayed()-> 1112 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1113 } 1114 1115 bind(no_unlock); 1116 pop(state); 1117 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1118 } 1119 1120 1121 // remove activation 1122 // 1123 // Unlock the receiver if this is a synchronized method. 1124 // Unlock any Java monitors from syncronized blocks. 1125 // Remove the activation from the stack. 1126 // 1127 // If there are locked Java monitors 1128 // If throw_monitor_exception 1129 // throws IllegalMonitorStateException 1130 // Else if install_monitor_exception 1131 // installs IllegalMonitorStateException 1132 // Else 1133 // no error processing 1134 void InterpreterMacroAssembler::remove_activation(TosState state, 1135 bool throw_monitor_exception, 1136 bool install_monitor_exception) { 1137 1138 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1139 1140 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1141 notify_method_exit(false, state, NotifyJVMTI); 1142 1143 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1144 verify_thread(); 1145 1146 // return tos 1147 assert(Otos_l1 == Otos_i, "adjust code below"); 1148 switch (state) { 1149 #ifdef _LP64 1150 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1151 #else 1152 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1153 #endif 1154 case btos: // fall through 1155 case ctos: 1156 case stos: // fall through 1157 case atos: // fall through 1158 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1159 case ftos: // fall through 1160 case dtos: // fall through 1161 case vtos: /* nothing to do */ break; 1162 default : ShouldNotReachHere(); 1163 } 1164 1165 #if defined(COMPILER2) && !defined(_LP64) 1166 if (state == ltos) { 1167 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1168 // or compiled so just be safe use G1 and O0/O1 1169 1170 // Shift bits into high (msb) of G1 1171 sllx(Otos_l1->after_save(), 32, G1); 1172 // Zero extend low bits 1173 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1174 or3 (Otos_l2->after_save(), G1, G1); 1175 } 1176 #endif /* COMPILER2 */ 1177 1178 } 1179 #endif /* CC_INTERP */ 1180 1181 1182 // Lock object 1183 // 1184 // Argument - lock_reg points to the BasicObjectLock to be used for locking, 1185 // it must be initialized with the object to lock 1186 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1187 if (UseHeavyMonitors) { 1188 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1189 } 1190 else { 1191 Register obj_reg = Object; 1192 Register mark_reg = G4_scratch; 1193 Register temp_reg = G1_scratch; 1194 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1195 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1196 Label done; 1197 1198 Label slow_case; 1199 1200 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1201 1202 // load markOop from object into mark_reg 1203 ld_ptr(mark_addr, mark_reg); 1204 1205 if (UseBiasedLocking) { 1206 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1207 } 1208 1209 // get the address of basicLock on stack that will be stored in the object 1210 // we need a temporary register here as we do not want to clobber lock_reg 1211 // (cas clobbers the destination register) 1212 mov(lock_reg, temp_reg); 1213 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1214 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1215 // initialize the box (Must happen before we update the object mark!) 1216 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1217 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1218 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1219 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 1220 1221 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1222 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1223 1224 // We did not see an unlocked object so try the fast recursive case 1225 1226 // Check if owner is self by comparing the value in the markOop of object 1227 // with the stack pointer 1228 sub(temp_reg, SP, temp_reg); 1229 #ifdef _LP64 1230 sub(temp_reg, STACK_BIAS, temp_reg); 1231 #endif 1232 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1233 1234 // Composite "andcc" test: 1235 // (a) %sp -vs- markword proximity check, and, 1236 // (b) verify mark word LSBs == 0 (Stack-locked). 1237 // 1238 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1239 // Note that the page size used for %sp proximity testing is arbitrary and is 1240 // unrelated to the actual MMU page size. We use a 'logical' page size of 1241 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1242 // field of the andcc instruction. 1243 andcc (temp_reg, 0xFFFFF003, G0) ; 1244 1245 // if condition is true we are done and hence we can store 0 in the displaced 1246 // header indicating it is a recursive lock and be done 1247 brx(Assembler::zero, true, Assembler::pt, done); 1248 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1249 1250 // none of the above fast optimizations worked so we have to get into the 1251 // slow case of monitor enter 1252 bind(slow_case); 1253 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1254 1255 bind(done); 1256 } 1257 } 1258 1259 // Unlocks an object. Used in monitorexit bytecode and remove_activation. 1260 // 1261 // Argument - lock_reg points to the BasicObjectLock for lock 1262 // Throw IllegalMonitorException if object is not locked by current thread 1263 void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1264 if (UseHeavyMonitors) { 1265 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1266 } else { 1267 Register obj_reg = G3_scratch; 1268 Register mark_reg = G4_scratch; 1269 Register displaced_header_reg = G1_scratch; 1270 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1271 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1272 Label done; 1273 1274 if (UseBiasedLocking) { 1275 // load the object out of the BasicObjectLock 1276 ld_ptr(lockobj_addr, obj_reg); 1277 biased_locking_exit(mark_addr, mark_reg, done, true); 1278 st_ptr(G0, lockobj_addr); // free entry 1279 } 1280 1281 // Test first if we are in the fast recursive case 1282 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1283 ld_ptr(lock_addr, displaced_header_reg); 1284 br_null(displaced_header_reg, true, Assembler::pn, done); 1285 delayed()->st_ptr(G0, lockobj_addr); // free entry 1286 1287 // See if it is still a light weight lock, if so we just unlock 1288 // the object and we are done 1289 1290 if (!UseBiasedLocking) { 1291 // load the object out of the BasicObjectLock 1292 ld_ptr(lockobj_addr, obj_reg); 1293 } 1294 1295 // we have the displaced header in displaced_header_reg 1296 // we expect to see the stack address of the basicLock in case the 1297 // lock is still a light weight lock (lock_reg) 1298 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1299 cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); 1300 cmp(lock_reg, displaced_header_reg); 1301 brx(Assembler::equal, true, Assembler::pn, done); 1302 delayed()->st_ptr(G0, lockobj_addr); // free entry 1303 1304 // The lock has been converted into a heavy lock and hence 1305 // we need to get into the slow case 1306 1307 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1308 1309 bind(done); 1310 } 1311 } 1312 1313 #ifndef CC_INTERP 1314 1315 // Get the method data pointer from the Method* and set the 1316 // specified register to its value. 1317 1318 void InterpreterMacroAssembler::set_method_data_pointer() { 1319 assert(ProfileInterpreter, "must be profiling interpreter"); 1320 Label get_continue; 1321 1322 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1323 test_method_data_pointer(get_continue); 1324 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1325 bind(get_continue); 1326 } 1327 1328 // Set the method data pointer for the current bcp. 1329 1330 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1331 assert(ProfileInterpreter, "must be profiling interpreter"); 1332 Label zero_continue; 1333 1334 // Test MDO to avoid the call if it is NULL. 1335 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1336 test_method_data_pointer(zero_continue); 1337 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1338 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1339 add(ImethodDataPtr, O0, ImethodDataPtr); 1340 bind(zero_continue); 1341 } 1342 1343 // Test ImethodDataPtr. If it is null, continue at the specified label 1344 1345 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1346 assert(ProfileInterpreter, "must be profiling interpreter"); 1347 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1348 } 1349 1350 void InterpreterMacroAssembler::verify_method_data_pointer() { 1351 assert(ProfileInterpreter, "must be profiling interpreter"); 1352 #ifdef ASSERT 1353 Label verify_continue; 1354 test_method_data_pointer(verify_continue); 1355 1356 // If the mdp is valid, it will point to a DataLayout header which is 1357 // consistent with the bcp. The converse is highly probable also. 1358 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1359 ld_ptr(Lmethod, Method::const_offset(), O5); 1360 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1361 add(G3_scratch, O5, G3_scratch); 1362 cmp(Lbcp, G3_scratch); 1363 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1364 1365 Register temp_reg = O5; 1366 delayed()->mov(ImethodDataPtr, temp_reg); 1367 // %%% should use call_VM_leaf here? 1368 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1369 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1370 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1371 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1372 mov(temp_reg->after_save(), O2); 1373 save_thread(L7_thread_cache); 1374 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1375 delayed()->nop(); 1376 restore_thread(L7_thread_cache); 1377 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1378 restore(); 1379 bind(verify_continue); 1380 #endif // ASSERT 1381 } 1382 1383 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1384 Register method_counters, 1385 Register Rtmp, 1386 Label &profile_continue) { 1387 assert(ProfileInterpreter, "must be profiling interpreter"); 1388 // Control will flow to "profile_continue" if the counter is less than the 1389 // limit or if we call profile_method() 1390 1391 Label done; 1392 1393 // if no method data exists, and the counter is high enough, make one 1394 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1395 1396 // Test to see if we should create a method data oop 1397 Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset()); 1398 ld(profile_limit, Rtmp); 1399 cmp(invocation_count, Rtmp); 1400 // Use long branches because call_VM() code and following code generated by 1401 // test_backedge_count_for_osr() is large in debug VM. 1402 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); 1403 delayed()->nop(); 1404 1405 // Build it now. 1406 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1407 set_method_data_pointer_for_bcp(); 1408 ba(profile_continue); 1409 delayed()->nop(); 1410 bind(done); 1411 } 1412 1413 // Store a value at some constant offset from the method data pointer. 1414 1415 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1416 assert(ProfileInterpreter, "must be profiling interpreter"); 1417 st_ptr(value, ImethodDataPtr, constant); 1418 } 1419 1420 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1421 Register bumped_count, 1422 bool decrement) { 1423 assert(ProfileInterpreter, "must be profiling interpreter"); 1424 1425 // Load the counter. 1426 ld_ptr(counter, bumped_count); 1427 1428 if (decrement) { 1429 // Decrement the register. Set condition codes. 1430 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1431 1432 // If the decrement causes the counter to overflow, stay negative 1433 Label L; 1434 brx(Assembler::negative, true, Assembler::pn, L); 1435 1436 // Store the decremented counter, if it is still negative. 1437 delayed()->st_ptr(bumped_count, counter); 1438 bind(L); 1439 } else { 1440 // Increment the register. Set carry flag. 1441 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1442 1443 // If the increment causes the counter to overflow, pull back by 1. 1444 assert(DataLayout::counter_increment == 1, "subc works"); 1445 subc(bumped_count, G0, bumped_count); 1446 1447 // Store the incremented counter. 1448 st_ptr(bumped_count, counter); 1449 } 1450 } 1451 1452 // Increment the value at some constant offset from the method data pointer. 1453 1454 void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1455 Register bumped_count, 1456 bool decrement) { 1457 // Locate the counter at a fixed offset from the mdp: 1458 Address counter(ImethodDataPtr, constant); 1459 increment_mdp_data_at(counter, bumped_count, decrement); 1460 } 1461 1462 // Increment the value at some non-fixed (reg + constant) offset from 1463 // the method data pointer. 1464 1465 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1466 int constant, 1467 Register bumped_count, 1468 Register scratch2, 1469 bool decrement) { 1470 // Add the constant to reg to get the offset. 1471 add(ImethodDataPtr, reg, scratch2); 1472 Address counter(scratch2, constant); 1473 increment_mdp_data_at(counter, bumped_count, decrement); 1474 } 1475 1476 // Set a flag value at the current method data pointer position. 1477 // Updates a single byte of the header, to avoid races with other header bits. 1478 1479 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1480 Register scratch) { 1481 assert(ProfileInterpreter, "must be profiling interpreter"); 1482 // Load the data header 1483 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1484 1485 // Set the flag 1486 or3(scratch, flag_constant, scratch); 1487 1488 // Store the modified header. 1489 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1490 } 1491 1492 // Test the location at some offset from the method data pointer. 1493 // If it is not equal to value, branch to the not_equal_continue Label. 1494 // Set condition codes to match the nullness of the loaded value. 1495 1496 void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1497 Register value, 1498 Label& not_equal_continue, 1499 Register scratch) { 1500 assert(ProfileInterpreter, "must be profiling interpreter"); 1501 ld_ptr(ImethodDataPtr, offset, scratch); 1502 cmp(value, scratch); 1503 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1504 delayed()->tst(scratch); 1505 } 1506 1507 // Update the method data pointer by the displacement located at some fixed 1508 // offset from the method data pointer. 1509 1510 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1511 Register scratch) { 1512 assert(ProfileInterpreter, "must be profiling interpreter"); 1513 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1514 add(ImethodDataPtr, scratch, ImethodDataPtr); 1515 } 1516 1517 // Update the method data pointer by the displacement located at the 1518 // offset (reg + offset_of_disp). 1519 1520 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1521 int offset_of_disp, 1522 Register scratch) { 1523 assert(ProfileInterpreter, "must be profiling interpreter"); 1524 add(reg, offset_of_disp, scratch); 1525 ld_ptr(ImethodDataPtr, scratch, scratch); 1526 add(ImethodDataPtr, scratch, ImethodDataPtr); 1527 } 1528 1529 // Update the method data pointer by a simple constant displacement. 1530 1531 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1532 assert(ProfileInterpreter, "must be profiling interpreter"); 1533 add(ImethodDataPtr, constant, ImethodDataPtr); 1534 } 1535 1536 // Update the method data pointer for a _ret bytecode whose target 1537 // was not among our cached targets. 1538 1539 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1540 Register return_bci) { 1541 assert(ProfileInterpreter, "must be profiling interpreter"); 1542 push(state); 1543 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1544 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1545 ld_ptr(l_tmp, return_bci); 1546 pop(state); 1547 } 1548 1549 // Count a taken branch in the bytecodes. 1550 1551 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1552 if (ProfileInterpreter) { 1553 Label profile_continue; 1554 1555 // If no method data exists, go to profile_continue. 1556 test_method_data_pointer(profile_continue); 1557 1558 // We are taking a branch. Increment the taken count. 1559 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1560 1561 // The method data pointer needs to be updated to reflect the new target. 1562 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1563 bind (profile_continue); 1564 } 1565 } 1566 1567 1568 // Count a not-taken branch in the bytecodes. 1569 1570 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1571 if (ProfileInterpreter) { 1572 Label profile_continue; 1573 1574 // If no method data exists, go to profile_continue. 1575 test_method_data_pointer(profile_continue); 1576 1577 // We are taking a branch. Increment the not taken count. 1578 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1579 1580 // The method data pointer needs to be updated to correspond to the 1581 // next bytecode. 1582 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1583 bind (profile_continue); 1584 } 1585 } 1586 1587 1588 // Count a non-virtual call in the bytecodes. 1589 1590 void InterpreterMacroAssembler::profile_call(Register scratch) { 1591 if (ProfileInterpreter) { 1592 Label profile_continue; 1593 1594 // If no method data exists, go to profile_continue. 1595 test_method_data_pointer(profile_continue); 1596 1597 // We are making a call. Increment the count. 1598 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1599 1600 // The method data pointer needs to be updated to reflect the new target. 1601 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1602 bind (profile_continue); 1603 } 1604 } 1605 1606 1607 // Count a final call in the bytecodes. 1608 1609 void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1610 if (ProfileInterpreter) { 1611 Label profile_continue; 1612 1613 // If no method data exists, go to profile_continue. 1614 test_method_data_pointer(profile_continue); 1615 1616 // We are making a call. Increment the count. 1617 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1618 1619 // The method data pointer needs to be updated to reflect the new target. 1620 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1621 bind (profile_continue); 1622 } 1623 } 1624 1625 1626 // Count a virtual call in the bytecodes. 1627 1628 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1629 Register scratch, 1630 bool receiver_can_be_null) { 1631 if (ProfileInterpreter) { 1632 Label profile_continue; 1633 1634 // If no method data exists, go to profile_continue. 1635 test_method_data_pointer(profile_continue); 1636 1637 1638 Label skip_receiver_profile; 1639 if (receiver_can_be_null) { 1640 Label not_null; 1641 br_notnull_short(receiver, Assembler::pt, not_null); 1642 // We are making a call. Increment the count for null receiver. 1643 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1644 ba_short(skip_receiver_profile); 1645 bind(not_null); 1646 } 1647 1648 // Record the receiver type. 1649 record_klass_in_profile(receiver, scratch, true); 1650 bind(skip_receiver_profile); 1651 1652 // The method data pointer needs to be updated to reflect the new target. 1653 #if INCLUDE_JVMCI 1654 if (MethodProfileWidth == 0) { 1655 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1656 } 1657 #else 1658 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1659 #endif 1660 bind(profile_continue); 1661 } 1662 } 1663 1664 #if INCLUDE_JVMCI 1665 void InterpreterMacroAssembler::profile_called_method(Register method, Register scratch) { 1666 assert_different_registers(method, scratch); 1667 if (ProfileInterpreter && MethodProfileWidth > 0) { 1668 Label profile_continue; 1669 1670 // If no method data exists, go to profile_continue. 1671 test_method_data_pointer(profile_continue); 1672 1673 Label done; 1674 record_item_in_profile_helper(method, scratch, 0, done, MethodProfileWidth, 1675 &VirtualCallData::method_offset, &VirtualCallData::method_count_offset, in_bytes(VirtualCallData::nonprofiled_receiver_count_offset())); 1676 bind(done); 1677 1678 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1679 bind(profile_continue); 1680 } 1681 } 1682 #endif // INCLUDE_JVMCI 1683 1684 void InterpreterMacroAssembler::record_klass_in_profile_helper(Register receiver, Register scratch, 1685 Label& done, bool is_virtual_call) { 1686 if (TypeProfileWidth == 0) { 1687 if (is_virtual_call) { 1688 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1689 } 1690 #if INCLUDE_JVMCI 1691 else if (EnableJVMCI) { 1692 increment_mdp_data_at(in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()), scratch); 1693 } 1694 #endif 1695 } else { 1696 int non_profiled_offset = -1; 1697 if (is_virtual_call) { 1698 non_profiled_offset = in_bytes(CounterData::count_offset()); 1699 } 1700 #if INCLUDE_JVMCI 1701 else if (EnableJVMCI) { 1702 non_profiled_offset = in_bytes(ReceiverTypeData::nonprofiled_receiver_count_offset()); 1703 } 1704 #endif 1705 1706 record_item_in_profile_helper(receiver, scratch, 0, done, TypeProfileWidth, 1707 &VirtualCallData::receiver_offset, &VirtualCallData::receiver_count_offset, non_profiled_offset); 1708 } 1709 } 1710 1711 void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, 1712 Register scratch, int start_row, Label& done, int total_rows, 1713 OffsetFunction item_offset_fn, OffsetFunction item_count_offset_fn, 1714 int non_profiled_offset) { 1715 int last_row = total_rows - 1; 1716 assert(start_row <= last_row, "must be work left to do"); 1717 // Test this row for both the item and for null. 1718 // Take any of three different outcomes: 1719 // 1. found item => increment count and goto done 1720 // 2. found null => keep looking for case 1, maybe allocate this cell 1721 // 3. found something else => keep looking for cases 1 and 2 1722 // Case 3 is handled by a recursive call. 1723 for (int row = start_row; row <= last_row; row++) { 1724 Label next_test; 1725 bool test_for_null_also = (row == start_row); 1726 1727 // See if the item is item[n]. 1728 int item_offset = in_bytes(item_offset_fn(row)); 1729 test_mdp_data_at(item_offset, item, next_test, scratch); 1730 // delayed()->tst(scratch); 1731 1732 // The receiver is item[n]. Increment count[n]. 1733 int count_offset = in_bytes(item_count_offset_fn(row)); 1734 increment_mdp_data_at(count_offset, scratch); 1735 ba_short(done); 1736 bind(next_test); 1737 1738 if (test_for_null_also) { 1739 Label found_null; 1740 // Failed the equality check on item[n]... Test for null. 1741 if (start_row == last_row) { 1742 // The only thing left to do is handle the null case. 1743 if (non_profiled_offset >= 0) { 1744 brx(Assembler::zero, false, Assembler::pn, found_null); 1745 delayed()->nop(); 1746 // Item did not match any saved item and there is no empty row for it. 1747 // Increment total counter to indicate polymorphic case. 1748 increment_mdp_data_at(non_profiled_offset, scratch); 1749 ba_short(done); 1750 bind(found_null); 1751 } else { 1752 brx(Assembler::notZero, false, Assembler::pt, done); 1753 delayed()->nop(); 1754 } 1755 break; 1756 } 1757 // Since null is rare, make it be the branch-taken case. 1758 brx(Assembler::zero, false, Assembler::pn, found_null); 1759 delayed()->nop(); 1760 1761 // Put all the "Case 3" tests here. 1762 record_item_in_profile_helper(item, scratch, start_row + 1, done, total_rows, 1763 item_offset_fn, item_count_offset_fn, non_profiled_offset); 1764 1765 // Found a null. Keep searching for a matching item, 1766 // but remember that this is an empty (unused) slot. 1767 bind(found_null); 1768 } 1769 } 1770 1771 // In the fall-through case, we found no matching item, but we 1772 // observed the item[start_row] is NULL. 1773 1774 // Fill in the item field and increment the count. 1775 int item_offset = in_bytes(item_offset_fn(start_row)); 1776 set_mdp_data_at(item_offset, item); 1777 int count_offset = in_bytes(item_count_offset_fn(start_row)); 1778 mov(DataLayout::counter_increment, scratch); 1779 set_mdp_data_at(count_offset, scratch); 1780 if (start_row > 0) { 1781 ba_short(done); 1782 } 1783 } 1784 1785 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1786 Register scratch, bool is_virtual_call) { 1787 assert(ProfileInterpreter, "must be profiling"); 1788 Label done; 1789 1790 record_klass_in_profile_helper(receiver, scratch, done, is_virtual_call); 1791 1792 bind (done); 1793 } 1794 1795 1796 // Count a ret in the bytecodes. 1797 1798 void InterpreterMacroAssembler::profile_ret(TosState state, 1799 Register return_bci, 1800 Register scratch) { 1801 if (ProfileInterpreter) { 1802 Label profile_continue; 1803 uint row; 1804 1805 // If no method data exists, go to profile_continue. 1806 test_method_data_pointer(profile_continue); 1807 1808 // Update the total ret count. 1809 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1810 1811 for (row = 0; row < RetData::row_limit(); row++) { 1812 Label next_test; 1813 1814 // See if return_bci is equal to bci[n]: 1815 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1816 return_bci, next_test, scratch); 1817 1818 // return_bci is equal to bci[n]. Increment the count. 1819 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1820 1821 // The method data pointer needs to be updated to reflect the new target. 1822 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1823 ba_short(profile_continue); 1824 bind(next_test); 1825 } 1826 1827 update_mdp_for_ret(state, return_bci); 1828 1829 bind (profile_continue); 1830 } 1831 } 1832 1833 // Profile an unexpected null in the bytecodes. 1834 void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1835 if (ProfileInterpreter) { 1836 Label profile_continue; 1837 1838 // If no method data exists, go to profile_continue. 1839 test_method_data_pointer(profile_continue); 1840 1841 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1842 1843 // The method data pointer needs to be updated. 1844 int mdp_delta = in_bytes(BitData::bit_data_size()); 1845 if (TypeProfileCasts) { 1846 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1847 } 1848 update_mdp_by_constant(mdp_delta); 1849 1850 bind (profile_continue); 1851 } 1852 } 1853 1854 void InterpreterMacroAssembler::profile_typecheck(Register klass, 1855 Register scratch) { 1856 if (ProfileInterpreter) { 1857 Label profile_continue; 1858 1859 // If no method data exists, go to profile_continue. 1860 test_method_data_pointer(profile_continue); 1861 1862 int mdp_delta = in_bytes(BitData::bit_data_size()); 1863 if (TypeProfileCasts) { 1864 mdp_delta = in_bytes(ReceiverTypeData::receiver_type_data_size()); 1865 1866 // Record the object type. 1867 record_klass_in_profile(klass, scratch, false); 1868 } 1869 1870 // The method data pointer needs to be updated. 1871 update_mdp_by_constant(mdp_delta); 1872 1873 bind (profile_continue); 1874 } 1875 } 1876 1877 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1878 if (ProfileInterpreter && TypeProfileCasts) { 1879 Label profile_continue; 1880 1881 // If no method data exists, go to profile_continue. 1882 test_method_data_pointer(profile_continue); 1883 1884 int count_offset = in_bytes(CounterData::count_offset()); 1885 // Back up the address, since we have already bumped the mdp. 1886 count_offset -= in_bytes(ReceiverTypeData::receiver_type_data_size()); 1887 1888 // *Decrement* the counter. We expect to see zero or small negatives. 1889 increment_mdp_data_at(count_offset, scratch, true); 1890 1891 bind (profile_continue); 1892 } 1893 } 1894 1895 // Count the default case of a switch construct. 1896 1897 void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1898 if (ProfileInterpreter) { 1899 Label profile_continue; 1900 1901 // If no method data exists, go to profile_continue. 1902 test_method_data_pointer(profile_continue); 1903 1904 // Update the default case count 1905 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1906 scratch); 1907 1908 // The method data pointer needs to be updated. 1909 update_mdp_by_offset( 1910 in_bytes(MultiBranchData::default_displacement_offset()), 1911 scratch); 1912 1913 bind (profile_continue); 1914 } 1915 } 1916 1917 // Count the index'th case of a switch construct. 1918 1919 void InterpreterMacroAssembler::profile_switch_case(Register index, 1920 Register scratch, 1921 Register scratch2, 1922 Register scratch3) { 1923 if (ProfileInterpreter) { 1924 Label profile_continue; 1925 1926 // If no method data exists, go to profile_continue. 1927 test_method_data_pointer(profile_continue); 1928 1929 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1930 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1931 smul(index, scratch, scratch); 1932 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1933 1934 // Update the case count 1935 increment_mdp_data_at(scratch, 1936 in_bytes(MultiBranchData::relative_count_offset()), 1937 scratch2, 1938 scratch3); 1939 1940 // The method data pointer needs to be updated. 1941 update_mdp_by_offset(scratch, 1942 in_bytes(MultiBranchData::relative_displacement_offset()), 1943 scratch2); 1944 1945 bind (profile_continue); 1946 } 1947 } 1948 1949 void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) { 1950 Label not_null, do_nothing, do_update; 1951 1952 assert_different_registers(obj, mdo_addr.base(), tmp); 1953 1954 verify_oop(obj); 1955 1956 ld_ptr(mdo_addr, tmp); 1957 1958 br_notnull_short(obj, pt, not_null); 1959 or3(tmp, TypeEntries::null_seen, tmp); 1960 ba_short(do_update); 1961 1962 bind(not_null); 1963 load_klass(obj, obj); 1964 1965 xor3(obj, tmp, obj); 1966 btst(TypeEntries::type_klass_mask, obj); 1967 // klass seen before, nothing to do. The unknown bit may have been 1968 // set already but no need to check. 1969 brx(zero, false, pt, do_nothing); 1970 delayed()-> 1971 1972 btst(TypeEntries::type_unknown, obj); 1973 // already unknown. Nothing to do anymore. 1974 brx(notZero, false, pt, do_nothing); 1975 delayed()-> 1976 1977 btst(TypeEntries::type_mask, tmp); 1978 brx(zero, true, pt, do_update); 1979 // first time here. Set profile type. 1980 delayed()->or3(tmp, obj, tmp); 1981 1982 // different than before. Cannot keep accurate profile. 1983 or3(tmp, TypeEntries::type_unknown, tmp); 1984 1985 bind(do_update); 1986 // update profile 1987 st_ptr(tmp, mdo_addr); 1988 1989 bind(do_nothing); 1990 } 1991 1992 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { 1993 if (!ProfileInterpreter) { 1994 return; 1995 } 1996 1997 assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr); 1998 1999 if (MethodData::profile_arguments() || MethodData::profile_return()) { 2000 Label profile_continue; 2001 2002 test_method_data_pointer(profile_continue); 2003 2004 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 2005 2006 ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1); 2007 cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue); 2008 2009 if (MethodData::profile_arguments()) { 2010 Label done; 2011 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 2012 add(ImethodDataPtr, off_to_args, ImethodDataPtr); 2013 2014 for (int i = 0; i < TypeProfileArgsLimit; i++) { 2015 if (i > 0 || MethodData::profile_return()) { 2016 // If return value type is profiled we may have no argument to profile 2017 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2018 sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1); 2019 cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done); 2020 } 2021 ld_ptr(Address(callee, Method::const_offset()), tmp1); 2022 lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1); 2023 // stack offset o (zero based) from the start of the argument 2024 // list, for n arguments translates into offset n - o - 1 from 2025 // the end of the argument list. But there's an extra slot at 2026 // the stop of the stack. So the offset is n - o from Lesp. 2027 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2); 2028 sub(tmp1, tmp2, tmp1); 2029 2030 // Can't use MacroAssembler::argument_address() which needs Gargs to be set up 2031 sll(tmp1, Interpreter::logStackElementSize, tmp1); 2032 ld_ptr(Lesp, tmp1, tmp1); 2033 2034 Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); 2035 profile_obj_type(tmp1, mdo_arg_addr, tmp2); 2036 2037 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 2038 add(ImethodDataPtr, to_add, ImethodDataPtr); 2039 off_to_args += to_add; 2040 } 2041 2042 if (MethodData::profile_return()) { 2043 ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1); 2044 sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1); 2045 } 2046 2047 bind(done); 2048 2049 if (MethodData::profile_return()) { 2050 // We're right after the type profile for the last 2051 // argument. tmp1 is the number of cells left in the 2052 // CallTypeData/VirtualCallTypeData to reach its end. Non null 2053 // if there's a return to profile. 2054 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 2055 sll(tmp1, exact_log2(DataLayout::cell_size), tmp1); 2056 add(ImethodDataPtr, tmp1, ImethodDataPtr); 2057 } 2058 } else { 2059 assert(MethodData::profile_return(), "either profile call args or call ret"); 2060 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); 2061 } 2062 2063 // mdp points right after the end of the 2064 // CallTypeData/VirtualCallTypeData, right after the cells for the 2065 // return value type if there's one. 2066 2067 bind(profile_continue); 2068 } 2069 } 2070 2071 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { 2072 assert_different_registers(ret, tmp1, tmp2); 2073 if (ProfileInterpreter && MethodData::profile_return()) { 2074 Label profile_continue, done; 2075 2076 test_method_data_pointer(profile_continue); 2077 2078 if (MethodData::profile_return_jsr292_only()) { 2079 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 2080 2081 // If we don't profile all invoke bytecodes we must make sure 2082 // it's a bytecode we indeed profile. We can't go back to the 2083 // begining of the ProfileData we intend to update to check its 2084 // type because we're right after it and we don't known its 2085 // length. 2086 Label do_profile; 2087 ldub(Lbcp, 0, tmp1); 2088 cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile); 2089 cmp(tmp1, Bytecodes::_invokehandle); 2090 br(equal, false, pn, do_profile); 2091 delayed()->lduh(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1); 2092 cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue); 2093 2094 bind(do_profile); 2095 } 2096 2097 Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size())); 2098 mov(ret, tmp1); 2099 profile_obj_type(tmp1, mdo_ret_addr, tmp2); 2100 2101 bind(profile_continue); 2102 } 2103 } 2104 2105 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 2106 if (ProfileInterpreter && MethodData::profile_parameters()) { 2107 Label profile_continue, done; 2108 2109 test_method_data_pointer(profile_continue); 2110 2111 // Load the offset of the area within the MDO used for 2112 // parameters. If it's negative we're not profiling any parameters. 2113 lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1); 2114 cmp_and_br_short(tmp1, 0, less, pn, profile_continue); 2115 2116 // Compute a pointer to the area for parameters from the offset 2117 // and move the pointer to the slot for the last 2118 // parameters. Collect profiling from last parameter down. 2119 // mdo start + parameters offset + array length - 1 2120 2121 // Pointer to the parameter area in the MDO 2122 Register mdp = tmp1; 2123 add(ImethodDataPtr, tmp1, mdp); 2124 2125 // offset of the current profile entry to update 2126 Register entry_offset = tmp2; 2127 // entry_offset = array len in number of cells 2128 ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset); 2129 2130 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 2131 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); 2132 2133 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field 2134 sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset); 2135 // entry_offset in bytes 2136 sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset); 2137 2138 Label loop; 2139 bind(loop); 2140 2141 // load offset on the stack from the slot for this parameter 2142 ld_ptr(mdp, entry_offset, tmp3); 2143 sll(tmp3,Interpreter::logStackElementSize, tmp3); 2144 neg(tmp3); 2145 // read the parameter from the local area 2146 ld_ptr(Llocals, tmp3, tmp3); 2147 2148 // make entry_offset now point to the type field for this parameter 2149 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 2150 assert(type_base > off_base, "unexpected"); 2151 add(entry_offset, type_base - off_base, entry_offset); 2152 2153 // profile the parameter 2154 Address arg_type(mdp, entry_offset); 2155 profile_obj_type(tmp3, arg_type, tmp4); 2156 2157 // go to next parameter 2158 sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset); 2159 cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop); 2160 2161 bind(profile_continue); 2162 } 2163 } 2164 2165 // add a InterpMonitorElem to stack (see frame_sparc.hpp) 2166 2167 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 2168 Register Rtemp, 2169 Register Rtemp2 ) { 2170 2171 Register Rlimit = Lmonitors; 2172 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2173 assert( (delta & LongAlignmentMask) == 0, 2174 "sizeof BasicObjectLock must be even number of doublewords"); 2175 2176 sub( SP, delta, SP); 2177 sub( Lesp, delta, Lesp); 2178 sub( Lmonitors, delta, Lmonitors); 2179 2180 if (!stack_is_empty) { 2181 2182 // must copy stack contents down 2183 2184 Label start_copying, next; 2185 2186 // untested("monitor stack expansion"); 2187 compute_stack_base(Rtemp); 2188 ba(start_copying); 2189 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 2190 2191 // note: must copy from low memory upwards 2192 // On entry to loop, 2193 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 2194 // Loop mutates Rtemp 2195 2196 bind( next); 2197 2198 st_ptr(Rtemp2, Rtemp, 0); 2199 inc(Rtemp, wordSize); 2200 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 2201 2202 bind( start_copying ); 2203 2204 brx( notEqual, true, pn, next ); 2205 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 2206 2207 // done copying stack 2208 } 2209 } 2210 2211 // Locals 2212 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 2213 assert_not_delayed(); 2214 sll(index, Interpreter::logStackElementSize, index); 2215 sub(Llocals, index, index); 2216 ld_ptr(index, 0, dst); 2217 // Note: index must hold the effective address--the iinc template uses it 2218 } 2219 2220 // Just like access_local_ptr but the tag is a returnAddress 2221 void InterpreterMacroAssembler::access_local_returnAddress(Register index, 2222 Register dst ) { 2223 assert_not_delayed(); 2224 sll(index, Interpreter::logStackElementSize, index); 2225 sub(Llocals, index, index); 2226 ld_ptr(index, 0, dst); 2227 } 2228 2229 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 2230 assert_not_delayed(); 2231 sll(index, Interpreter::logStackElementSize, index); 2232 sub(Llocals, index, index); 2233 ld(index, 0, dst); 2234 // Note: index must hold the effective address--the iinc template uses it 2235 } 2236 2237 2238 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 2239 assert_not_delayed(); 2240 sll(index, Interpreter::logStackElementSize, index); 2241 sub(Llocals, index, index); 2242 // First half stored at index n+1 (which grows down from Llocals[n]) 2243 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 2244 } 2245 2246 2247 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 2248 assert_not_delayed(); 2249 sll(index, Interpreter::logStackElementSize, index); 2250 sub(Llocals, index, index); 2251 ldf(FloatRegisterImpl::S, index, 0, dst); 2252 } 2253 2254 2255 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 2256 assert_not_delayed(); 2257 sll(index, Interpreter::logStackElementSize, index); 2258 sub(Llocals, index, index); 2259 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 2260 } 2261 2262 2263 #ifdef ASSERT 2264 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 2265 Label L; 2266 2267 assert(Rindex != Rscratch, "Registers cannot be same"); 2268 assert(Rindex != Rscratch1, "Registers cannot be same"); 2269 assert(Rlimit != Rscratch, "Registers cannot be same"); 2270 assert(Rlimit != Rscratch1, "Registers cannot be same"); 2271 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 2272 2273 // untested("reg area corruption"); 2274 add(Rindex, offset, Rscratch); 2275 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 2276 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 2277 stop("regsave area is being clobbered"); 2278 bind(L); 2279 } 2280 #endif // ASSERT 2281 2282 2283 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 2284 assert_not_delayed(); 2285 sll(index, Interpreter::logStackElementSize, index); 2286 sub(Llocals, index, index); 2287 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 2288 st(src, index, 0); 2289 } 2290 2291 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 2292 assert_not_delayed(); 2293 sll(index, Interpreter::logStackElementSize, index); 2294 sub(Llocals, index, index); 2295 #ifdef ASSERT 2296 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2297 #endif 2298 st_ptr(src, index, 0); 2299 } 2300 2301 2302 2303 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2304 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2305 } 2306 2307 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2308 assert_not_delayed(); 2309 sll(index, Interpreter::logStackElementSize, index); 2310 sub(Llocals, index, index); 2311 #ifdef ASSERT 2312 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2313 #endif 2314 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2315 } 2316 2317 2318 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2319 assert_not_delayed(); 2320 sll(index, Interpreter::logStackElementSize, index); 2321 sub(Llocals, index, index); 2322 #ifdef ASSERT 2323 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2324 #endif 2325 stf(FloatRegisterImpl::S, src, index, 0); 2326 } 2327 2328 2329 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2330 assert_not_delayed(); 2331 sll(index, Interpreter::logStackElementSize, index); 2332 sub(Llocals, index, index); 2333 #ifdef ASSERT 2334 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2335 #endif 2336 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2337 } 2338 2339 2340 int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2341 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2342 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2343 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2344 } 2345 2346 2347 Address InterpreterMacroAssembler::top_most_monitor() { 2348 return Address(FP, top_most_monitor_byte_offset()); 2349 } 2350 2351 2352 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2353 add( Lesp, wordSize, Rdest ); 2354 } 2355 2356 #endif /* CC_INTERP */ 2357 2358 void InterpreterMacroAssembler::get_method_counters(Register method, 2359 Register Rcounters, 2360 Label& skip) { 2361 Label has_counters; 2362 Address method_counters(method, in_bytes(Method::method_counters_offset())); 2363 ld_ptr(method_counters, Rcounters); 2364 br_notnull_short(Rcounters, Assembler::pt, has_counters); 2365 call_VM(noreg, CAST_FROM_FN_PTR(address, 2366 InterpreterRuntime::build_method_counters), method); 2367 ld_ptr(method_counters, Rcounters); 2368 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory 2369 delayed()->nop(); 2370 bind(has_counters); 2371 } 2372 2373 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2374 assert(UseCompiler || LogTouchedMethods, "incrementing must be useful"); 2375 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2376 2377 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2378 InvocationCounter::counter_offset()); 2379 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2380 InvocationCounter::counter_offset()); 2381 int delta = InvocationCounter::count_increment; 2382 2383 // Load each counter in a register 2384 ld( inv_counter, Rtmp ); 2385 ld( be_counter, Rtmp2 ); 2386 2387 assert( is_simm13( delta ), " delta too large."); 2388 2389 // Add the delta to the invocation counter and store the result 2390 add( Rtmp, delta, Rtmp ); 2391 2392 // Mask the backedge counter 2393 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2394 2395 // Store value 2396 st( Rtmp, inv_counter); 2397 2398 // Add invocation counter + backedge counter 2399 add( Rtmp, Rtmp2, Rtmp); 2400 2401 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2402 } 2403 2404 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { 2405 assert(UseCompiler, "incrementing must be useful"); 2406 assert_different_registers(Rcounters, Rtmp, Rtmp2); 2407 2408 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + 2409 InvocationCounter::counter_offset()); 2410 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + 2411 InvocationCounter::counter_offset()); 2412 2413 int delta = InvocationCounter::count_increment; 2414 // Load each counter in a register 2415 ld( be_counter, Rtmp ); 2416 ld( inv_counter, Rtmp2 ); 2417 2418 // Add the delta to the backedge counter 2419 add( Rtmp, delta, Rtmp ); 2420 2421 // Mask the invocation counter, add to backedge counter 2422 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2423 2424 // and store the result to memory 2425 st( Rtmp, be_counter ); 2426 2427 // Add backedge + invocation counter 2428 add( Rtmp, Rtmp2, Rtmp ); 2429 2430 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2431 } 2432 2433 #ifndef CC_INTERP 2434 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2435 Register method_counters, 2436 Register branch_bcp, 2437 Register Rtmp ) { 2438 Label did_not_overflow; 2439 Label overflow_with_error; 2440 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2441 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2442 2443 Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())); 2444 ld(limit, Rtmp); 2445 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2446 2447 // When ProfileInterpreter is on, the backedge_count comes from the 2448 // MethodData*, which value does not get reset on the call to 2449 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2450 // routine while the method is being compiled, add a second test to make sure 2451 // the overflow function is called only once every overflow_frequency. 2452 if (ProfileInterpreter) { 2453 const int overflow_frequency = 1024; 2454 andcc(backedge_count, overflow_frequency-1, Rtmp); 2455 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2456 delayed()->nop(); 2457 } 2458 2459 // overflow in loop, pass branch bytecode 2460 set(6,Rtmp); 2461 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2462 2463 // Was an OSR adapter generated? 2464 // O0 = osr nmethod 2465 br_null_short(O0, Assembler::pn, overflow_with_error); 2466 2467 // Has the nmethod been invalidated already? 2468 ldub(O0, nmethod::state_offset(), O2); 2469 cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, overflow_with_error); 2470 2471 // migrate the interpreter frame off of the stack 2472 2473 mov(G2_thread, L7); 2474 // save nmethod 2475 mov(O0, L6); 2476 set_last_Java_frame(SP, noreg); 2477 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2478 reset_last_Java_frame(); 2479 mov(L7, G2_thread); 2480 2481 // move OSR nmethod to I1 2482 mov(L6, I1); 2483 2484 // OSR buffer to I0 2485 mov(O0, I0); 2486 2487 // remove the interpreter frame 2488 restore(I5_savedSP, 0, SP); 2489 2490 // Jump to the osr code. 2491 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2492 jmp(O2, G0); 2493 delayed()->nop(); 2494 2495 bind(overflow_with_error); 2496 2497 bind(did_not_overflow); 2498 } 2499 2500 2501 2502 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2503 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2504 } 2505 2506 2507 // local helper function for the verify_oop_or_return_address macro 2508 static bool verify_return_address(Method* m, int bci) { 2509 #ifndef PRODUCT 2510 address pc = (address)(m->constMethod()) 2511 + in_bytes(ConstMethod::codes_offset()) + bci; 2512 // assume it is a valid return address if it is inside m and is preceded by a jsr 2513 if (!m->contains(pc)) return false; 2514 address jsr_pc; 2515 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2516 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2517 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2518 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2519 #endif // PRODUCT 2520 return false; 2521 } 2522 2523 2524 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2525 if (!VerifyOops) return; 2526 // the VM documentation for the astore[_wide] bytecode allows 2527 // the TOS to be not only an oop but also a return address 2528 Label test; 2529 Label skip; 2530 // See if it is an address (in the current method): 2531 2532 mov(reg, Rtmp); 2533 const int log2_bytecode_size_limit = 16; 2534 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2535 br_notnull_short( Rtmp, pt, test ); 2536 2537 // %%% should use call_VM_leaf here? 2538 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2539 save_thread(L7_thread_cache); 2540 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2541 delayed()->nop(); 2542 restore_thread(L7_thread_cache); 2543 br_notnull( O0, false, pt, skip ); 2544 delayed()->restore(); 2545 2546 // Perform a more elaborate out-of-line call 2547 // Not an address; verify it: 2548 bind(test); 2549 verify_oop(reg); 2550 bind(skip); 2551 } 2552 2553 2554 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2555 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2556 } 2557 2558 2559 // Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2560 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2561 int increment, Address mask_addr, 2562 Register scratch1, Register scratch2, 2563 Condition cond, Label *where) { 2564 ld(counter_addr, scratch1); 2565 add(scratch1, increment, scratch1); 2566 ld(mask_addr, scratch2); 2567 andcc(scratch1, scratch2, G0); 2568 br(cond, false, Assembler::pn, *where); 2569 delayed()->st(scratch1, counter_addr); 2570 } 2571 #endif /* CC_INTERP */ 2572 2573 // Inline assembly for: 2574 // 2575 // if (thread is in interp_only_mode) { 2576 // InterpreterRuntime::post_method_entry(); 2577 // } 2578 // if (DTraceMethodProbes) { 2579 // SharedRuntime::dtrace_method_entry(method, receiver); 2580 // } 2581 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2582 // SharedRuntime::rc_trace_method_entry(method, receiver); 2583 // } 2584 2585 void InterpreterMacroAssembler::notify_method_entry() { 2586 2587 // C++ interpreter only uses this for native methods. 2588 2589 // Whenever JVMTI puts a thread in interp_only_mode, method 2590 // entry/exit events are sent for that thread to track stack 2591 // depth. If it is possible to enter interp_only_mode we add 2592 // the code to check if the event should be sent. 2593 if (JvmtiExport::can_post_interpreter_events()) { 2594 Label L; 2595 Register temp_reg = O5; 2596 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2597 ld(interp_only, temp_reg); 2598 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2599 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2600 bind(L); 2601 } 2602 2603 { 2604 Register temp_reg = O5; 2605 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2606 call_VM_leaf(noreg, 2607 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2608 G2_thread, Lmethod); 2609 } 2610 2611 // RedefineClasses() tracing support for obsolete method entry 2612 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2613 call_VM_leaf(noreg, 2614 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2615 G2_thread, Lmethod); 2616 } 2617 } 2618 2619 2620 // Inline assembly for: 2621 // 2622 // if (thread is in interp_only_mode) { 2623 // // save result 2624 // InterpreterRuntime::post_method_exit(); 2625 // // restore result 2626 // } 2627 // if (DTraceMethodProbes) { 2628 // SharedRuntime::dtrace_method_exit(thread, method); 2629 // } 2630 // 2631 // Native methods have their result stored in d_tmp and l_tmp 2632 // Java methods have their result stored in the expression stack 2633 2634 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2635 TosState state, 2636 NotifyMethodExitMode mode) { 2637 // C++ interpreter only uses this for native methods. 2638 2639 // Whenever JVMTI puts a thread in interp_only_mode, method 2640 // entry/exit events are sent for that thread to track stack 2641 // depth. If it is possible to enter interp_only_mode we add 2642 // the code to check if the event should be sent. 2643 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2644 Label L; 2645 Register temp_reg = O5; 2646 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2647 ld(interp_only, temp_reg); 2648 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2649 2650 // Note: frame::interpreter_frame_result has a dependency on how the 2651 // method result is saved across the call to post_method_exit. For 2652 // native methods it assumes the result registers are saved to 2653 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2654 // implementation will need to be updated too. 2655 2656 save_return_value(state, is_native_method); 2657 call_VM(noreg, 2658 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2659 restore_return_value(state, is_native_method); 2660 bind(L); 2661 } 2662 2663 { 2664 Register temp_reg = O5; 2665 // Dtrace notification 2666 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2667 save_return_value(state, is_native_method); 2668 call_VM_leaf( 2669 noreg, 2670 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2671 G2_thread, Lmethod); 2672 restore_return_value(state, is_native_method); 2673 } 2674 } 2675 2676 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2677 #ifdef CC_INTERP 2678 // result potentially in O0/O1: save it across calls 2679 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); 2680 #ifdef _LP64 2681 stx(O0, STATE(_native_lresult)); 2682 #else 2683 std(O0, STATE(_native_lresult)); 2684 #endif 2685 #else // CC_INTERP 2686 if (is_native_call) { 2687 stf(FloatRegisterImpl::D, F0, d_tmp); 2688 #ifdef _LP64 2689 stx(O0, l_tmp); 2690 #else 2691 std(O0, l_tmp); 2692 #endif 2693 } else { 2694 push(state); 2695 } 2696 #endif // CC_INTERP 2697 } 2698 2699 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2700 #ifdef CC_INTERP 2701 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); 2702 #ifdef _LP64 2703 ldx(STATE(_native_lresult), O0); 2704 #else 2705 ldd(STATE(_native_lresult), O0); 2706 #endif 2707 #else // CC_INTERP 2708 if (is_native_call) { 2709 ldf(FloatRegisterImpl::D, d_tmp, F0); 2710 #ifdef _LP64 2711 ldx(l_tmp, O0); 2712 #else 2713 ldd(l_tmp, O0); 2714 #endif 2715 } else { 2716 pop(state); 2717 } 2718 #endif // CC_INTERP 2719 }