rev 56556 : 8232151: Minimal VM build broken after JDK-8232050 Reviewed-by: dholmes, clanger, redestad
1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interpreterRuntime.hpp" 31 #include "interpreter/interp_masm.hpp" 32 #include "interpreter/templateInterpreter.hpp" 33 #include "interpreter/templateTable.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/safepointMechanism.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "utilities/macros.hpp" 44 45 #undef __ 46 #define __ _masm-> 47 48 // ============================================================================ 49 // Misc helpers 50 51 // Do an oop store like *(base + index) = val OR *(base + offset) = val 52 // (only one of both variants is possible at the same time). 53 // Index can be noreg. 54 // Kills: 55 // Rbase, Rtmp 56 static void do_oop_store(InterpreterMacroAssembler* _masm, 57 Register base, 58 RegisterOrConstant offset, 59 Register val, // Noreg means always null. 60 Register tmp1, 61 Register tmp2, 62 Register tmp3, 63 DecoratorSet decorators) { 64 assert_different_registers(tmp1, tmp2, tmp3, val, base); 65 __ store_heap_oop(val, offset, base, tmp1, tmp2, tmp3, false, decorators); 66 } 67 68 static void do_oop_load(InterpreterMacroAssembler* _masm, 69 Register base, 70 RegisterOrConstant offset, 71 Register dst, 72 Register tmp1, 73 Register tmp2, 74 DecoratorSet decorators) { 75 assert_different_registers(base, tmp1, tmp2); 76 assert_different_registers(dst, tmp1, tmp2); 77 __ load_heap_oop(dst, offset, base, tmp1, tmp2, false, decorators); 78 } 79 80 // ============================================================================ 81 // Platform-dependent initialization 82 83 void TemplateTable::pd_initialize() { 84 // No ppc64 specific initialization. 85 } 86 87 Address TemplateTable::at_bcp(int offset) { 88 // Not used on ppc. 89 ShouldNotReachHere(); 90 return Address(); 91 } 92 93 // Patches the current bytecode (ptr to it located in bcp) 94 // in the bytecode stream with a new one. 95 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 96 // With sharing on, may need to test method flag. 97 if (!RewriteBytecodes) return; 98 Label L_patch_done; 99 100 switch (new_bc) { 101 case Bytecodes::_fast_aputfield: 102 case Bytecodes::_fast_bputfield: 103 case Bytecodes::_fast_zputfield: 104 case Bytecodes::_fast_cputfield: 105 case Bytecodes::_fast_dputfield: 106 case Bytecodes::_fast_fputfield: 107 case Bytecodes::_fast_iputfield: 108 case Bytecodes::_fast_lputfield: 109 case Bytecodes::_fast_sputfield: 110 { 111 // We skip bytecode quickening for putfield instructions when 112 // the put_code written to the constant pool cache is zero. 113 // This is required so that every execution of this instruction 114 // calls out to InterpreterRuntime::resolve_get_put to do 115 // additional, required work. 116 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 117 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 118 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 119 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 120 #if defined(VM_LITTLE_ENDIAN) 121 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 122 #else 123 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 124 #endif 125 __ cmpwi(CCR0, Rnew_bc, 0); 126 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 127 __ beq(CCR0, L_patch_done); 128 // __ isync(); // acquire not needed 129 break; 130 } 131 132 default: 133 assert(byte_no == -1, "sanity"); 134 if (load_bc_into_bc_reg) { 135 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 136 } 137 } 138 139 if (JvmtiExport::can_post_breakpoint()) { 140 Label L_fast_patch; 141 __ lbz(Rtemp, 0, R14_bcp); 142 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 143 __ bne(CCR0, L_fast_patch); 144 // Perform the quickening, slowly, in the bowels of the breakpoint table. 145 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 146 __ b(L_patch_done); 147 __ bind(L_fast_patch); 148 } 149 150 // Patch bytecode. 151 __ stb(Rnew_bc, 0, R14_bcp); 152 153 __ bind(L_patch_done); 154 } 155 156 // ============================================================================ 157 // Individual instructions 158 159 void TemplateTable::nop() { 160 transition(vtos, vtos); 161 // Nothing to do. 162 } 163 164 void TemplateTable::shouldnotreachhere() { 165 transition(vtos, vtos); 166 __ stop("shouldnotreachhere bytecode"); 167 } 168 169 void TemplateTable::aconst_null() { 170 transition(vtos, atos); 171 __ li(R17_tos, 0); 172 } 173 174 void TemplateTable::iconst(int value) { 175 transition(vtos, itos); 176 assert(value >= -1 && value <= 5, ""); 177 __ li(R17_tos, value); 178 } 179 180 void TemplateTable::lconst(int value) { 181 transition(vtos, ltos); 182 assert(value >= -1 && value <= 5, ""); 183 __ li(R17_tos, value); 184 } 185 186 void TemplateTable::fconst(int value) { 187 transition(vtos, ftos); 188 static float zero = 0.0; 189 static float one = 1.0; 190 static float two = 2.0; 191 switch (value) { 192 default: ShouldNotReachHere(); 193 case 0: { 194 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 195 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 196 break; 197 } 198 case 1: { 199 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 200 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 201 break; 202 } 203 case 2: { 204 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 205 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 206 break; 207 } 208 } 209 } 210 211 void TemplateTable::dconst(int value) { 212 transition(vtos, dtos); 213 static double zero = 0.0; 214 static double one = 1.0; 215 switch (value) { 216 case 0: { 217 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 218 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 219 break; 220 } 221 case 1: { 222 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 223 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 224 break; 225 } 226 default: ShouldNotReachHere(); 227 } 228 } 229 230 void TemplateTable::bipush() { 231 transition(vtos, itos); 232 __ lbz(R17_tos, 1, R14_bcp); 233 __ extsb(R17_tos, R17_tos); 234 } 235 236 void TemplateTable::sipush() { 237 transition(vtos, itos); 238 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 239 } 240 241 void TemplateTable::ldc(bool wide) { 242 Register Rscratch1 = R11_scratch1, 243 Rscratch2 = R12_scratch2, 244 Rcpool = R3_ARG1; 245 246 transition(vtos, vtos); 247 Label notInt, notFloat, notClass, exit; 248 249 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 250 if (wide) { // Read index. 251 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 252 } else { 253 __ lbz(Rscratch1, 1, R14_bcp); 254 } 255 256 const int base_offset = ConstantPool::header_size() * wordSize; 257 const int tags_offset = Array<u1>::base_offset_in_bytes(); 258 259 // Get type from tags. 260 __ addi(Rscratch2, Rscratch2, tags_offset); 261 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 262 263 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 264 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 265 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 266 267 // Resolved class - need to call vm to get java mirror of the class. 268 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 269 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 270 __ beq(CCR0, notClass); 271 272 __ li(R4, wide ? 1 : 0); 273 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 274 __ push(atos); 275 __ b(exit); 276 277 __ align(32, 12); 278 __ bind(notClass); 279 __ addi(Rcpool, Rcpool, base_offset); 280 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 281 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 282 __ bne(CCR0, notInt); 283 __ lwax(R17_tos, Rcpool, Rscratch1); 284 __ push(itos); 285 __ b(exit); 286 287 __ align(32, 12); 288 __ bind(notInt); 289 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 290 __ bne(CCR0, notFloat); 291 __ lfsx(F15_ftos, Rcpool, Rscratch1); 292 __ push(ftos); 293 __ b(exit); 294 295 __ align(32, 12); 296 // assume the tag is for condy; if not, the VM runtime will tell us 297 __ bind(notFloat); 298 condy_helper(exit); 299 300 __ align(32, 12); 301 __ bind(exit); 302 } 303 304 // Fast path for caching oop constants. 305 void TemplateTable::fast_aldc(bool wide) { 306 transition(vtos, atos); 307 308 int index_size = wide ? sizeof(u2) : sizeof(u1); 309 const Register Rscratch = R11_scratch1; 310 Label is_null; 311 312 // We are resolved if the resolved reference cache entry contains a 313 // non-null object (CallSite, etc.) 314 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 315 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 316 317 // Convert null sentinel to NULL. 318 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); 319 __ ld(Rscratch, simm16_rest, Rscratch); 320 __ cmpld(CCR0, R17_tos, Rscratch); 321 if (VM_Version::has_isel()) { 322 __ isel_0(R17_tos, CCR0, Assembler::equal); 323 } else { 324 Label not_sentinel; 325 __ bne(CCR0, not_sentinel); 326 __ li(R17_tos, 0); 327 __ bind(not_sentinel); 328 } 329 __ verify_oop(R17_tos); 330 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 331 332 __ bind(is_null); 333 __ load_const_optimized(R3_ARG1, (int)bytecode()); 334 335 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 336 337 // First time invocation - must resolve first. 338 __ call_VM(R17_tos, entry, R3_ARG1); 339 __ verify_oop(R17_tos); 340 } 341 342 void TemplateTable::ldc2_w() { 343 transition(vtos, vtos); 344 Label not_double, not_long, exit; 345 346 Register Rindex = R11_scratch1, 347 Rcpool = R12_scratch2, 348 Rtag = R3_ARG1; 349 __ get_cpool_and_tags(Rcpool, Rtag); 350 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 351 352 const int base_offset = ConstantPool::header_size() * wordSize; 353 const int tags_offset = Array<u1>::base_offset_in_bytes(); 354 // Get type from tags. 355 __ addi(Rcpool, Rcpool, base_offset); 356 __ addi(Rtag, Rtag, tags_offset); 357 358 __ lbzx(Rtag, Rtag, Rindex); 359 __ sldi(Rindex, Rindex, LogBytesPerWord); 360 361 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 362 __ bne(CCR0, not_double); 363 __ lfdx(F15_ftos, Rcpool, Rindex); 364 __ push(dtos); 365 __ b(exit); 366 367 __ bind(not_double); 368 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); 369 __ bne(CCR0, not_long); 370 __ ldx(R17_tos, Rcpool, Rindex); 371 __ push(ltos); 372 __ b(exit); 373 374 __ bind(not_long); 375 condy_helper(exit); 376 377 __ align(32, 12); 378 __ bind(exit); 379 } 380 381 void TemplateTable::condy_helper(Label& Done) { 382 const Register obj = R31; 383 const Register off = R11_scratch1; 384 const Register flags = R12_scratch2; 385 const Register rarg = R4_ARG2; 386 __ li(rarg, (int)bytecode()); 387 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); 388 __ get_vm_result_2(flags); 389 390 // VMr = obj = base address to find primitive value to push 391 // VMr2 = flags = (tos, off) using format of CPCE::_flags 392 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); 393 394 // What sort of thing are we loading? 395 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 396 397 switch (bytecode()) { 398 case Bytecodes::_ldc: 399 case Bytecodes::_ldc_w: 400 { 401 // tos in (itos, ftos, stos, btos, ctos, ztos) 402 Label notInt, notFloat, notShort, notByte, notChar, notBool; 403 __ cmplwi(CCR0, flags, itos); 404 __ bne(CCR0, notInt); 405 // itos 406 __ lwax(R17_tos, obj, off); 407 __ push(itos); 408 __ b(Done); 409 410 __ bind(notInt); 411 __ cmplwi(CCR0, flags, ftos); 412 __ bne(CCR0, notFloat); 413 // ftos 414 __ lfsx(F15_ftos, obj, off); 415 __ push(ftos); 416 __ b(Done); 417 418 __ bind(notFloat); 419 __ cmplwi(CCR0, flags, stos); 420 __ bne(CCR0, notShort); 421 // stos 422 __ lhax(R17_tos, obj, off); 423 __ push(stos); 424 __ b(Done); 425 426 __ bind(notShort); 427 __ cmplwi(CCR0, flags, btos); 428 __ bne(CCR0, notByte); 429 // btos 430 __ lbzx(R17_tos, obj, off); 431 __ extsb(R17_tos, R17_tos); 432 __ push(btos); 433 __ b(Done); 434 435 __ bind(notByte); 436 __ cmplwi(CCR0, flags, ctos); 437 __ bne(CCR0, notChar); 438 // ctos 439 __ lhzx(R17_tos, obj, off); 440 __ push(ctos); 441 __ b(Done); 442 443 __ bind(notChar); 444 __ cmplwi(CCR0, flags, ztos); 445 __ bne(CCR0, notBool); 446 // ztos 447 __ lbzx(R17_tos, obj, off); 448 __ push(ztos); 449 __ b(Done); 450 451 __ bind(notBool); 452 break; 453 } 454 455 case Bytecodes::_ldc2_w: 456 { 457 Label notLong, notDouble; 458 __ cmplwi(CCR0, flags, ltos); 459 __ bne(CCR0, notLong); 460 // ltos 461 __ ldx(R17_tos, obj, off); 462 __ push(ltos); 463 __ b(Done); 464 465 __ bind(notLong); 466 __ cmplwi(CCR0, flags, dtos); 467 __ bne(CCR0, notDouble); 468 // dtos 469 __ lfdx(F15_ftos, obj, off); 470 __ push(dtos); 471 __ b(Done); 472 473 __ bind(notDouble); 474 break; 475 } 476 477 default: 478 ShouldNotReachHere(); 479 } 480 481 __ stop("bad ldc/condy"); 482 } 483 484 // Get the locals index located in the bytecode stream at bcp + offset. 485 void TemplateTable::locals_index(Register Rdst, int offset) { 486 __ lbz(Rdst, offset, R14_bcp); 487 } 488 489 void TemplateTable::iload() { 490 iload_internal(); 491 } 492 493 void TemplateTable::nofast_iload() { 494 iload_internal(may_not_rewrite); 495 } 496 497 void TemplateTable::iload_internal(RewriteControl rc) { 498 transition(vtos, itos); 499 500 // Get the local value into tos 501 const Register Rindex = R22_tmp2; 502 locals_index(Rindex); 503 504 // Rewrite iload,iload pair into fast_iload2 505 // iload,caload pair into fast_icaload 506 if (RewriteFrequentPairs && rc == may_rewrite) { 507 Label Lrewrite, Ldone; 508 Register Rnext_byte = R3_ARG1, 509 Rrewrite_to = R6_ARG4, 510 Rscratch = R11_scratch1; 511 512 // get next byte 513 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 514 515 // if _iload, wait to rewrite to iload2. We only want to rewrite the 516 // last two iloads in a pair. Comparing against fast_iload means that 517 // the next bytecode is neither an iload or a caload, and therefore 518 // an iload pair. 519 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 520 __ beq(CCR0, Ldone); 521 522 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 523 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 524 __ beq(CCR1, Lrewrite); 525 526 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 527 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 528 __ beq(CCR0, Lrewrite); 529 530 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 531 532 __ bind(Lrewrite); 533 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 534 __ bind(Ldone); 535 } 536 537 __ load_local_int(R17_tos, Rindex, Rindex); 538 } 539 540 // Load 2 integers in a row without dispatching 541 void TemplateTable::fast_iload2() { 542 transition(vtos, itos); 543 544 __ lbz(R3_ARG1, 1, R14_bcp); 545 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 546 547 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 548 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 549 __ push_i(R3_ARG1); 550 } 551 552 void TemplateTable::fast_iload() { 553 transition(vtos, itos); 554 // Get the local value into tos 555 556 const Register Rindex = R11_scratch1; 557 locals_index(Rindex); 558 __ load_local_int(R17_tos, Rindex, Rindex); 559 } 560 561 // Load a local variable type long from locals area to TOS cache register. 562 // Local index resides in bytecodestream. 563 void TemplateTable::lload() { 564 transition(vtos, ltos); 565 566 const Register Rindex = R11_scratch1; 567 locals_index(Rindex); 568 __ load_local_long(R17_tos, Rindex, Rindex); 569 } 570 571 void TemplateTable::fload() { 572 transition(vtos, ftos); 573 574 const Register Rindex = R11_scratch1; 575 locals_index(Rindex); 576 __ load_local_float(F15_ftos, Rindex, Rindex); 577 } 578 579 void TemplateTable::dload() { 580 transition(vtos, dtos); 581 582 const Register Rindex = R11_scratch1; 583 locals_index(Rindex); 584 __ load_local_double(F15_ftos, Rindex, Rindex); 585 } 586 587 void TemplateTable::aload() { 588 transition(vtos, atos); 589 590 const Register Rindex = R11_scratch1; 591 locals_index(Rindex); 592 __ load_local_ptr(R17_tos, Rindex, Rindex); 593 } 594 595 void TemplateTable::locals_index_wide(Register Rdst) { 596 // Offset is 2, not 1, because Lbcp points to wide prefix code. 597 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 598 } 599 600 void TemplateTable::wide_iload() { 601 // Get the local value into tos. 602 603 const Register Rindex = R11_scratch1; 604 locals_index_wide(Rindex); 605 __ load_local_int(R17_tos, Rindex, Rindex); 606 } 607 608 void TemplateTable::wide_lload() { 609 transition(vtos, ltos); 610 611 const Register Rindex = R11_scratch1; 612 locals_index_wide(Rindex); 613 __ load_local_long(R17_tos, Rindex, Rindex); 614 } 615 616 void TemplateTable::wide_fload() { 617 transition(vtos, ftos); 618 619 const Register Rindex = R11_scratch1; 620 locals_index_wide(Rindex); 621 __ load_local_float(F15_ftos, Rindex, Rindex); 622 } 623 624 void TemplateTable::wide_dload() { 625 transition(vtos, dtos); 626 627 const Register Rindex = R11_scratch1; 628 locals_index_wide(Rindex); 629 __ load_local_double(F15_ftos, Rindex, Rindex); 630 } 631 632 void TemplateTable::wide_aload() { 633 transition(vtos, atos); 634 635 const Register Rindex = R11_scratch1; 636 locals_index_wide(Rindex); 637 __ load_local_ptr(R17_tos, Rindex, Rindex); 638 } 639 640 void TemplateTable::iaload() { 641 transition(itos, itos); 642 643 const Register Rload_addr = R3_ARG1, 644 Rarray = R4_ARG2, 645 Rtemp = R5_ARG3; 646 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 647 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 648 } 649 650 void TemplateTable::laload() { 651 transition(itos, ltos); 652 653 const Register Rload_addr = R3_ARG1, 654 Rarray = R4_ARG2, 655 Rtemp = R5_ARG3; 656 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 657 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 658 } 659 660 void TemplateTable::faload() { 661 transition(itos, ftos); 662 663 const Register Rload_addr = R3_ARG1, 664 Rarray = R4_ARG2, 665 Rtemp = R5_ARG3; 666 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 667 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 668 } 669 670 void TemplateTable::daload() { 671 transition(itos, dtos); 672 673 const Register Rload_addr = R3_ARG1, 674 Rarray = R4_ARG2, 675 Rtemp = R5_ARG3; 676 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 677 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 678 } 679 680 void TemplateTable::aaload() { 681 transition(itos, atos); 682 683 // tos: index 684 // result tos: array 685 const Register Rload_addr = R3_ARG1, 686 Rarray = R4_ARG2, 687 Rtemp = R5_ARG3, 688 Rtemp2 = R31; 689 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 690 do_oop_load(_masm, Rload_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos, Rtemp, Rtemp2, 691 IS_ARRAY); 692 __ verify_oop(R17_tos); 693 //__ dcbt(R17_tos); // prefetch 694 } 695 696 void TemplateTable::baload() { 697 transition(itos, itos); 698 699 const Register Rload_addr = R3_ARG1, 700 Rarray = R4_ARG2, 701 Rtemp = R5_ARG3; 702 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 703 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 704 __ extsb(R17_tos, R17_tos); 705 } 706 707 void TemplateTable::caload() { 708 transition(itos, itos); 709 710 const Register Rload_addr = R3_ARG1, 711 Rarray = R4_ARG2, 712 Rtemp = R5_ARG3; 713 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 714 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 715 } 716 717 // Iload followed by caload frequent pair. 718 void TemplateTable::fast_icaload() { 719 transition(vtos, itos); 720 721 const Register Rload_addr = R3_ARG1, 722 Rarray = R4_ARG2, 723 Rtemp = R11_scratch1; 724 725 locals_index(R17_tos); 726 __ load_local_int(R17_tos, Rtemp, R17_tos); 727 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 728 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 729 } 730 731 void TemplateTable::saload() { 732 transition(itos, itos); 733 734 const Register Rload_addr = R11_scratch1, 735 Rarray = R12_scratch2, 736 Rtemp = R3_ARG1; 737 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 738 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 739 } 740 741 void TemplateTable::iload(int n) { 742 transition(vtos, itos); 743 744 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 745 } 746 747 void TemplateTable::lload(int n) { 748 transition(vtos, ltos); 749 750 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 751 } 752 753 void TemplateTable::fload(int n) { 754 transition(vtos, ftos); 755 756 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 757 } 758 759 void TemplateTable::dload(int n) { 760 transition(vtos, dtos); 761 762 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 763 } 764 765 void TemplateTable::aload(int n) { 766 transition(vtos, atos); 767 768 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 769 } 770 771 void TemplateTable::aload_0() { 772 aload_0_internal(); 773 } 774 775 void TemplateTable::nofast_aload_0() { 776 aload_0_internal(may_not_rewrite); 777 } 778 779 void TemplateTable::aload_0_internal(RewriteControl rc) { 780 transition(vtos, atos); 781 // According to bytecode histograms, the pairs: 782 // 783 // _aload_0, _fast_igetfield 784 // _aload_0, _fast_agetfield 785 // _aload_0, _fast_fgetfield 786 // 787 // occur frequently. If RewriteFrequentPairs is set, the (slow) 788 // _aload_0 bytecode checks if the next bytecode is either 789 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 790 // rewrites the current bytecode into a pair bytecode; otherwise it 791 // rewrites the current bytecode into _0 that doesn't do 792 // the pair check anymore. 793 // 794 // Note: If the next bytecode is _getfield, the rewrite must be 795 // delayed, otherwise we may miss an opportunity for a pair. 796 // 797 // Also rewrite frequent pairs 798 // aload_0, aload_1 799 // aload_0, iload_1 800 // These bytecodes with a small amount of code are most profitable 801 // to rewrite. 802 803 if (RewriteFrequentPairs && rc == may_rewrite) { 804 805 Label Lrewrite, Ldont_rewrite; 806 Register Rnext_byte = R3_ARG1, 807 Rrewrite_to = R6_ARG4, 808 Rscratch = R11_scratch1; 809 810 // Get next byte. 811 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 812 813 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 814 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 815 __ beq(CCR0, Ldont_rewrite); 816 817 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 818 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 819 __ beq(CCR1, Lrewrite); 820 821 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 822 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 823 __ beq(CCR0, Lrewrite); 824 825 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 826 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 827 __ beq(CCR1, Lrewrite); 828 829 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 830 831 __ bind(Lrewrite); 832 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 833 __ bind(Ldont_rewrite); 834 } 835 836 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 837 aload(0); 838 } 839 840 void TemplateTable::istore() { 841 transition(itos, vtos); 842 843 const Register Rindex = R11_scratch1; 844 locals_index(Rindex); 845 __ store_local_int(R17_tos, Rindex); 846 } 847 848 void TemplateTable::lstore() { 849 transition(ltos, vtos); 850 const Register Rindex = R11_scratch1; 851 locals_index(Rindex); 852 __ store_local_long(R17_tos, Rindex); 853 } 854 855 void TemplateTable::fstore() { 856 transition(ftos, vtos); 857 858 const Register Rindex = R11_scratch1; 859 locals_index(Rindex); 860 __ store_local_float(F15_ftos, Rindex); 861 } 862 863 void TemplateTable::dstore() { 864 transition(dtos, vtos); 865 866 const Register Rindex = R11_scratch1; 867 locals_index(Rindex); 868 __ store_local_double(F15_ftos, Rindex); 869 } 870 871 void TemplateTable::astore() { 872 transition(vtos, vtos); 873 874 const Register Rindex = R11_scratch1; 875 __ pop_ptr(); 876 __ verify_oop_or_return_address(R17_tos, Rindex); 877 locals_index(Rindex); 878 __ store_local_ptr(R17_tos, Rindex); 879 } 880 881 void TemplateTable::wide_istore() { 882 transition(vtos, vtos); 883 884 const Register Rindex = R11_scratch1; 885 __ pop_i(); 886 locals_index_wide(Rindex); 887 __ store_local_int(R17_tos, Rindex); 888 } 889 890 void TemplateTable::wide_lstore() { 891 transition(vtos, vtos); 892 893 const Register Rindex = R11_scratch1; 894 __ pop_l(); 895 locals_index_wide(Rindex); 896 __ store_local_long(R17_tos, Rindex); 897 } 898 899 void TemplateTable::wide_fstore() { 900 transition(vtos, vtos); 901 902 const Register Rindex = R11_scratch1; 903 __ pop_f(); 904 locals_index_wide(Rindex); 905 __ store_local_float(F15_ftos, Rindex); 906 } 907 908 void TemplateTable::wide_dstore() { 909 transition(vtos, vtos); 910 911 const Register Rindex = R11_scratch1; 912 __ pop_d(); 913 locals_index_wide(Rindex); 914 __ store_local_double(F15_ftos, Rindex); 915 } 916 917 void TemplateTable::wide_astore() { 918 transition(vtos, vtos); 919 920 const Register Rindex = R11_scratch1; 921 __ pop_ptr(); 922 __ verify_oop_or_return_address(R17_tos, Rindex); 923 locals_index_wide(Rindex); 924 __ store_local_ptr(R17_tos, Rindex); 925 } 926 927 void TemplateTable::iastore() { 928 transition(itos, vtos); 929 930 const Register Rindex = R3_ARG1, 931 Rstore_addr = R4_ARG2, 932 Rarray = R5_ARG3, 933 Rtemp = R6_ARG4; 934 __ pop_i(Rindex); 935 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 936 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 937 } 938 939 void TemplateTable::lastore() { 940 transition(ltos, vtos); 941 942 const Register Rindex = R3_ARG1, 943 Rstore_addr = R4_ARG2, 944 Rarray = R5_ARG3, 945 Rtemp = R6_ARG4; 946 __ pop_i(Rindex); 947 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 948 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 949 } 950 951 void TemplateTable::fastore() { 952 transition(ftos, vtos); 953 954 const Register Rindex = R3_ARG1, 955 Rstore_addr = R4_ARG2, 956 Rarray = R5_ARG3, 957 Rtemp = R6_ARG4; 958 __ pop_i(Rindex); 959 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 960 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 961 } 962 963 void TemplateTable::dastore() { 964 transition(dtos, vtos); 965 966 const Register Rindex = R3_ARG1, 967 Rstore_addr = R4_ARG2, 968 Rarray = R5_ARG3, 969 Rtemp = R6_ARG4; 970 __ pop_i(Rindex); 971 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 972 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 973 } 974 975 // Pop 3 values from the stack and... 976 void TemplateTable::aastore() { 977 transition(vtos, vtos); 978 979 Label Lstore_ok, Lis_null, Ldone; 980 const Register Rindex = R3_ARG1, 981 Rarray = R4_ARG2, 982 Rscratch = R11_scratch1, 983 Rscratch2 = R12_scratch2, 984 Rarray_klass = R5_ARG3, 985 Rarray_element_klass = Rarray_klass, 986 Rvalue_klass = R6_ARG4, 987 Rstore_addr = R31; // Use register which survives VM call. 988 989 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 990 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 991 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 992 993 __ verify_oop(R17_tos); 994 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 995 // Rindex is dead! 996 Register Rscratch3 = Rindex; 997 998 // Do array store check - check for NULL value first. 999 __ cmpdi(CCR0, R17_tos, 0); 1000 __ beq(CCR0, Lis_null); 1001 1002 __ load_klass(Rarray_klass, Rarray); 1003 __ load_klass(Rvalue_klass, R17_tos); 1004 1005 // Do fast instanceof cache test. 1006 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 1007 1008 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 1009 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 1010 1011 // Fell through: subtype check failed => throw an exception. 1012 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 1013 __ mtctr(R11_scratch1); 1014 __ bctr(); 1015 1016 __ bind(Lis_null); 1017 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 1018 Rscratch, Rscratch2, Rscratch3, IS_ARRAY); 1019 __ profile_null_seen(Rscratch, Rscratch2); 1020 __ b(Ldone); 1021 1022 // Store is OK. 1023 __ bind(Lstore_ok); 1024 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 1025 Rscratch, Rscratch2, Rscratch3, IS_ARRAY | IS_NOT_NULL); 1026 1027 __ bind(Ldone); 1028 // Adjust sp (pops array, index and value). 1029 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 1030 } 1031 1032 void TemplateTable::bastore() { 1033 transition(itos, vtos); 1034 1035 const Register Rindex = R11_scratch1, 1036 Rarray = R12_scratch2, 1037 Rscratch = R3_ARG1; 1038 __ pop_i(Rindex); 1039 __ pop_ptr(Rarray); 1040 // tos: val 1041 1042 // Need to check whether array is boolean or byte 1043 // since both types share the bastore bytecode. 1044 __ load_klass(Rscratch, Rarray); 1045 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch); 1046 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit()); 1047 __ testbitdi(CCR0, R0, Rscratch, diffbit); 1048 Label L_skip; 1049 __ bfalse(CCR0, L_skip); 1050 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 1051 __ bind(L_skip); 1052 1053 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray); 1054 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 1055 } 1056 1057 void TemplateTable::castore() { 1058 transition(itos, vtos); 1059 1060 const Register Rindex = R11_scratch1, 1061 Rarray = R12_scratch2, 1062 Rscratch = R3_ARG1; 1063 __ pop_i(Rindex); 1064 // tos: val 1065 // Rarray: array ptr (popped by index_check) 1066 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1067 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1068 } 1069 1070 void TemplateTable::sastore() { 1071 castore(); 1072 } 1073 1074 void TemplateTable::istore(int n) { 1075 transition(itos, vtos); 1076 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1077 } 1078 1079 void TemplateTable::lstore(int n) { 1080 transition(ltos, vtos); 1081 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1082 } 1083 1084 void TemplateTable::fstore(int n) { 1085 transition(ftos, vtos); 1086 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1087 } 1088 1089 void TemplateTable::dstore(int n) { 1090 transition(dtos, vtos); 1091 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1092 } 1093 1094 void TemplateTable::astore(int n) { 1095 transition(vtos, vtos); 1096 1097 __ pop_ptr(); 1098 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1099 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1100 } 1101 1102 void TemplateTable::pop() { 1103 transition(vtos, vtos); 1104 1105 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1106 } 1107 1108 void TemplateTable::pop2() { 1109 transition(vtos, vtos); 1110 1111 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1112 } 1113 1114 void TemplateTable::dup() { 1115 transition(vtos, vtos); 1116 1117 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1118 __ push_ptr(R11_scratch1); 1119 } 1120 1121 void TemplateTable::dup_x1() { 1122 transition(vtos, vtos); 1123 1124 Register Ra = R11_scratch1, 1125 Rb = R12_scratch2; 1126 // stack: ..., a, b 1127 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1128 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1129 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1130 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1131 __ push_ptr(Rb); 1132 // stack: ..., b, a, b 1133 } 1134 1135 void TemplateTable::dup_x2() { 1136 transition(vtos, vtos); 1137 1138 Register Ra = R11_scratch1, 1139 Rb = R12_scratch2, 1140 Rc = R3_ARG1; 1141 1142 // stack: ..., a, b, c 1143 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1144 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1145 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1146 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1147 // stack: ..., c, b, c 1148 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1149 // stack: ..., c, a, c 1150 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1151 __ push_ptr(Rc); // push c 1152 // stack: ..., c, a, b, c 1153 } 1154 1155 void TemplateTable::dup2() { 1156 transition(vtos, vtos); 1157 1158 Register Ra = R11_scratch1, 1159 Rb = R12_scratch2; 1160 // stack: ..., a, b 1161 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1162 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1163 __ push_2ptrs(Ra, Rb); 1164 // stack: ..., a, b, a, b 1165 } 1166 1167 void TemplateTable::dup2_x1() { 1168 transition(vtos, vtos); 1169 1170 Register Ra = R11_scratch1, 1171 Rb = R12_scratch2, 1172 Rc = R3_ARG1; 1173 // stack: ..., a, b, c 1174 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1175 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1176 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1177 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1178 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1179 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1180 // stack: ..., b, c, a 1181 __ push_2ptrs(Rb, Rc); 1182 // stack: ..., b, c, a, b, c 1183 } 1184 1185 void TemplateTable::dup2_x2() { 1186 transition(vtos, vtos); 1187 1188 Register Ra = R11_scratch1, 1189 Rb = R12_scratch2, 1190 Rc = R3_ARG1, 1191 Rd = R4_ARG2; 1192 // stack: ..., a, b, c, d 1193 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1194 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1195 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1196 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1197 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1198 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1199 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1200 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1201 // stack: ..., c, d, a, b 1202 __ push_2ptrs(Rc, Rd); 1203 // stack: ..., c, d, a, b, c, d 1204 } 1205 1206 void TemplateTable::swap() { 1207 transition(vtos, vtos); 1208 // stack: ..., a, b 1209 1210 Register Ra = R11_scratch1, 1211 Rb = R12_scratch2; 1212 // stack: ..., a, b 1213 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1214 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1215 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1216 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1217 // stack: ..., b, a 1218 } 1219 1220 void TemplateTable::iop2(Operation op) { 1221 transition(itos, itos); 1222 1223 Register Rscratch = R11_scratch1; 1224 1225 __ pop_i(Rscratch); 1226 // tos = number of bits to shift 1227 // Rscratch = value to shift 1228 switch (op) { 1229 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1230 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1231 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1232 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1233 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1234 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1235 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1236 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1237 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1238 default: ShouldNotReachHere(); 1239 } 1240 } 1241 1242 void TemplateTable::lop2(Operation op) { 1243 transition(ltos, ltos); 1244 1245 Register Rscratch = R11_scratch1; 1246 __ pop_l(Rscratch); 1247 switch (op) { 1248 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1249 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1250 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1251 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1252 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1253 default: ShouldNotReachHere(); 1254 } 1255 } 1256 1257 void TemplateTable::idiv() { 1258 transition(itos, itos); 1259 1260 Label Lnormal, Lexception, Ldone; 1261 Register Rdividend = R11_scratch1; // Used by irem. 1262 1263 __ addi(R0, R17_tos, 1); 1264 __ cmplwi(CCR0, R0, 2); 1265 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1266 1267 __ cmpwi(CCR1, R17_tos, 0); 1268 __ beq(CCR1, Lexception); // divisor == 0 1269 1270 __ pop_i(Rdividend); 1271 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1272 __ b(Ldone); 1273 1274 __ bind(Lexception); 1275 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1276 __ mtctr(R11_scratch1); 1277 __ bctr(); 1278 1279 __ align(32, 12); 1280 __ bind(Lnormal); 1281 __ pop_i(Rdividend); 1282 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1283 __ bind(Ldone); 1284 } 1285 1286 void TemplateTable::irem() { 1287 transition(itos, itos); 1288 1289 __ mr(R12_scratch2, R17_tos); 1290 idiv(); 1291 __ mullw(R17_tos, R17_tos, R12_scratch2); 1292 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1293 } 1294 1295 void TemplateTable::lmul() { 1296 transition(ltos, ltos); 1297 1298 __ pop_l(R11_scratch1); 1299 __ mulld(R17_tos, R11_scratch1, R17_tos); 1300 } 1301 1302 void TemplateTable::ldiv() { 1303 transition(ltos, ltos); 1304 1305 Label Lnormal, Lexception, Ldone; 1306 Register Rdividend = R11_scratch1; // Used by lrem. 1307 1308 __ addi(R0, R17_tos, 1); 1309 __ cmpldi(CCR0, R0, 2); 1310 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1311 1312 __ cmpdi(CCR1, R17_tos, 0); 1313 __ beq(CCR1, Lexception); // divisor == 0 1314 1315 __ pop_l(Rdividend); 1316 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1317 __ b(Ldone); 1318 1319 __ bind(Lexception); 1320 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1321 __ mtctr(R11_scratch1); 1322 __ bctr(); 1323 1324 __ align(32, 12); 1325 __ bind(Lnormal); 1326 __ pop_l(Rdividend); 1327 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1328 __ bind(Ldone); 1329 } 1330 1331 void TemplateTable::lrem() { 1332 transition(ltos, ltos); 1333 1334 __ mr(R12_scratch2, R17_tos); 1335 ldiv(); 1336 __ mulld(R17_tos, R17_tos, R12_scratch2); 1337 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1338 } 1339 1340 void TemplateTable::lshl() { 1341 transition(itos, ltos); 1342 1343 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1344 __ pop_l(R11_scratch1); 1345 __ sld(R17_tos, R11_scratch1, R17_tos); 1346 } 1347 1348 void TemplateTable::lshr() { 1349 transition(itos, ltos); 1350 1351 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1352 __ pop_l(R11_scratch1); 1353 __ srad(R17_tos, R11_scratch1, R17_tos); 1354 } 1355 1356 void TemplateTable::lushr() { 1357 transition(itos, ltos); 1358 1359 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1360 __ pop_l(R11_scratch1); 1361 __ srd(R17_tos, R11_scratch1, R17_tos); 1362 } 1363 1364 void TemplateTable::fop2(Operation op) { 1365 transition(ftos, ftos); 1366 1367 switch (op) { 1368 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1369 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1370 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1371 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1372 case rem: 1373 __ pop_f(F1_ARG1); 1374 __ fmr(F2_ARG2, F15_ftos); 1375 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1376 __ fmr(F15_ftos, F1_RET); 1377 break; 1378 1379 default: ShouldNotReachHere(); 1380 } 1381 } 1382 1383 void TemplateTable::dop2(Operation op) { 1384 transition(dtos, dtos); 1385 1386 switch (op) { 1387 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1388 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1389 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1390 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1391 case rem: 1392 __ pop_d(F1_ARG1); 1393 __ fmr(F2_ARG2, F15_ftos); 1394 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1395 __ fmr(F15_ftos, F1_RET); 1396 break; 1397 1398 default: ShouldNotReachHere(); 1399 } 1400 } 1401 1402 // Negate the value in the TOS cache. 1403 void TemplateTable::ineg() { 1404 transition(itos, itos); 1405 1406 __ neg(R17_tos, R17_tos); 1407 } 1408 1409 // Negate the value in the TOS cache. 1410 void TemplateTable::lneg() { 1411 transition(ltos, ltos); 1412 1413 __ neg(R17_tos, R17_tos); 1414 } 1415 1416 void TemplateTable::fneg() { 1417 transition(ftos, ftos); 1418 1419 __ fneg(F15_ftos, F15_ftos); 1420 } 1421 1422 void TemplateTable::dneg() { 1423 transition(dtos, dtos); 1424 1425 __ fneg(F15_ftos, F15_ftos); 1426 } 1427 1428 // Increments a local variable in place. 1429 void TemplateTable::iinc() { 1430 transition(vtos, vtos); 1431 1432 const Register Rindex = R11_scratch1, 1433 Rincrement = R0, 1434 Rvalue = R12_scratch2; 1435 1436 locals_index(Rindex); // Load locals index from bytecode stream. 1437 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1438 __ extsb(Rincrement, Rincrement); 1439 1440 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1441 1442 __ add(Rvalue, Rincrement, Rvalue); 1443 __ stw(Rvalue, 0, Rindex); 1444 } 1445 1446 void TemplateTable::wide_iinc() { 1447 transition(vtos, vtos); 1448 1449 Register Rindex = R11_scratch1, 1450 Rlocals_addr = Rindex, 1451 Rincr = R12_scratch2; 1452 locals_index_wide(Rindex); 1453 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1454 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1455 __ add(R17_tos, Rincr, R17_tos); 1456 __ stw(R17_tos, 0, Rlocals_addr); 1457 } 1458 1459 void TemplateTable::convert() { 1460 // %%%%% Factor this first part accross platforms 1461 #ifdef ASSERT 1462 TosState tos_in = ilgl; 1463 TosState tos_out = ilgl; 1464 switch (bytecode()) { 1465 case Bytecodes::_i2l: // fall through 1466 case Bytecodes::_i2f: // fall through 1467 case Bytecodes::_i2d: // fall through 1468 case Bytecodes::_i2b: // fall through 1469 case Bytecodes::_i2c: // fall through 1470 case Bytecodes::_i2s: tos_in = itos; break; 1471 case Bytecodes::_l2i: // fall through 1472 case Bytecodes::_l2f: // fall through 1473 case Bytecodes::_l2d: tos_in = ltos; break; 1474 case Bytecodes::_f2i: // fall through 1475 case Bytecodes::_f2l: // fall through 1476 case Bytecodes::_f2d: tos_in = ftos; break; 1477 case Bytecodes::_d2i: // fall through 1478 case Bytecodes::_d2l: // fall through 1479 case Bytecodes::_d2f: tos_in = dtos; break; 1480 default : ShouldNotReachHere(); 1481 } 1482 switch (bytecode()) { 1483 case Bytecodes::_l2i: // fall through 1484 case Bytecodes::_f2i: // fall through 1485 case Bytecodes::_d2i: // fall through 1486 case Bytecodes::_i2b: // fall through 1487 case Bytecodes::_i2c: // fall through 1488 case Bytecodes::_i2s: tos_out = itos; break; 1489 case Bytecodes::_i2l: // fall through 1490 case Bytecodes::_f2l: // fall through 1491 case Bytecodes::_d2l: tos_out = ltos; break; 1492 case Bytecodes::_i2f: // fall through 1493 case Bytecodes::_l2f: // fall through 1494 case Bytecodes::_d2f: tos_out = ftos; break; 1495 case Bytecodes::_i2d: // fall through 1496 case Bytecodes::_l2d: // fall through 1497 case Bytecodes::_f2d: tos_out = dtos; break; 1498 default : ShouldNotReachHere(); 1499 } 1500 transition(tos_in, tos_out); 1501 #endif 1502 1503 // Conversion 1504 Label done; 1505 switch (bytecode()) { 1506 case Bytecodes::_i2l: 1507 __ extsw(R17_tos, R17_tos); 1508 break; 1509 1510 case Bytecodes::_l2i: 1511 // Nothing to do, we'll continue to work with the lower bits. 1512 break; 1513 1514 case Bytecodes::_i2b: 1515 __ extsb(R17_tos, R17_tos); 1516 break; 1517 1518 case Bytecodes::_i2c: 1519 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1520 break; 1521 1522 case Bytecodes::_i2s: 1523 __ extsh(R17_tos, R17_tos); 1524 break; 1525 1526 case Bytecodes::_i2d: 1527 __ extsw(R17_tos, R17_tos); 1528 case Bytecodes::_l2d: 1529 __ move_l_to_d(); 1530 __ fcfid(F15_ftos, F15_ftos); 1531 break; 1532 1533 case Bytecodes::_i2f: 1534 __ extsw(R17_tos, R17_tos); 1535 __ move_l_to_d(); 1536 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1537 // Comment: alternatively, load with sign extend could be done by lfiwax. 1538 __ fcfids(F15_ftos, F15_ftos); 1539 } else { 1540 __ fcfid(F15_ftos, F15_ftos); 1541 __ frsp(F15_ftos, F15_ftos); 1542 } 1543 break; 1544 1545 case Bytecodes::_l2f: 1546 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1547 __ move_l_to_d(); 1548 __ fcfids(F15_ftos, F15_ftos); 1549 } else { 1550 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1551 __ mr(R3_ARG1, R17_tos); 1552 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1553 __ fmr(F15_ftos, F1_RET); 1554 } 1555 break; 1556 1557 case Bytecodes::_f2d: 1558 // empty 1559 break; 1560 1561 case Bytecodes::_d2f: 1562 __ frsp(F15_ftos, F15_ftos); 1563 break; 1564 1565 case Bytecodes::_d2i: 1566 case Bytecodes::_f2i: 1567 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1568 __ li(R17_tos, 0); // 0 in case of NAN 1569 __ bso(CCR0, done); 1570 __ fctiwz(F15_ftos, F15_ftos); 1571 __ move_d_to_l(); 1572 break; 1573 1574 case Bytecodes::_d2l: 1575 case Bytecodes::_f2l: 1576 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1577 __ li(R17_tos, 0); // 0 in case of NAN 1578 __ bso(CCR0, done); 1579 __ fctidz(F15_ftos, F15_ftos); 1580 __ move_d_to_l(); 1581 break; 1582 1583 default: ShouldNotReachHere(); 1584 } 1585 __ bind(done); 1586 } 1587 1588 // Long compare 1589 void TemplateTable::lcmp() { 1590 transition(ltos, itos); 1591 1592 const Register Rscratch = R11_scratch1; 1593 __ pop_l(Rscratch); // first operand, deeper in stack 1594 1595 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1596 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1597 __ srwi(Rscratch, R17_tos, 30); 1598 __ srawi(R17_tos, R17_tos, 31); 1599 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1600 } 1601 1602 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1603 // unordered_result == -1 => fcmpl or dcmpl 1604 // unordered_result == 1 => fcmpg or dcmpg 1605 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1606 const FloatRegister Rfirst = F0_SCRATCH, 1607 Rsecond = F15_ftos; 1608 const Register Rscratch = R11_scratch1; 1609 1610 if (is_float) { 1611 __ pop_f(Rfirst); 1612 } else { 1613 __ pop_d(Rfirst); 1614 } 1615 1616 Label Lunordered, Ldone; 1617 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1618 if (unordered_result) { 1619 __ bso(CCR0, Lunordered); 1620 } 1621 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1622 __ srwi(Rscratch, R17_tos, 30); 1623 __ srawi(R17_tos, R17_tos, 31); 1624 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1625 if (unordered_result) { 1626 __ b(Ldone); 1627 __ bind(Lunordered); 1628 __ load_const_optimized(R17_tos, unordered_result); 1629 } 1630 __ bind(Ldone); 1631 } 1632 1633 // Branch_conditional which takes TemplateTable::Condition. 1634 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1635 bool positive = false; 1636 Assembler::Condition cond = Assembler::equal; 1637 switch (cc) { 1638 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1639 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1640 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1641 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1642 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1643 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1644 default: ShouldNotReachHere(); 1645 } 1646 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1647 int bi = Assembler::bi0(crx, cond); 1648 __ bc(bo, bi, L); 1649 } 1650 1651 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1652 1653 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1654 __ verify_thread(); 1655 1656 const Register Rscratch1 = R11_scratch1, 1657 Rscratch2 = R12_scratch2, 1658 Rscratch3 = R3_ARG1, 1659 R4_counters = R4_ARG2, 1660 bumped_count = R31, 1661 Rdisp = R22_tmp2; 1662 1663 __ profile_taken_branch(Rscratch1, bumped_count); 1664 1665 // Get (wide) offset. 1666 if (is_wide) { 1667 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1668 } else { 1669 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1670 } 1671 1672 // -------------------------------------------------------------------------- 1673 // Handle all the JSR stuff here, then exit. 1674 // It's much shorter and cleaner than intermingling with the 1675 // non-JSR normal-branch stuff occurring below. 1676 if (is_jsr) { 1677 // Compute return address as bci in Otos_i. 1678 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1679 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1680 __ subf(R17_tos, Rscratch1, Rscratch2); 1681 1682 // Bump bcp to target of JSR. 1683 __ add(R14_bcp, Rdisp, R14_bcp); 1684 // Push returnAddress for "ret" on stack. 1685 __ push_ptr(R17_tos); 1686 // And away we go! 1687 __ dispatch_next(vtos, 0 ,true); 1688 return; 1689 } 1690 1691 // -------------------------------------------------------------------------- 1692 // Normal (non-jsr) branch handling 1693 1694 // Bump bytecode pointer by displacement (take the branch). 1695 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1696 1697 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1698 if (increment_invocation_counter_for_backward_branches) { 1699 Label Lforward; 1700 1701 // Check branch direction. 1702 __ cmpdi(CCR0, Rdisp, 0); 1703 __ bgt(CCR0, Lforward); 1704 1705 __ get_method_counters(R19_method, R4_counters, Lforward); 1706 1707 if (TieredCompilation) { 1708 Label Lno_mdo, Loverflow; 1709 const int increment = InvocationCounter::count_increment; 1710 if (ProfileInterpreter) { 1711 Register Rmdo = Rscratch1; 1712 1713 // If no method data exists, go to profile_continue. 1714 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1715 __ cmpdi(CCR0, Rmdo, 0); 1716 __ beq(CCR0, Lno_mdo); 1717 1718 // Increment backedge counter in the MDO. 1719 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1720 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1721 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo); 1722 __ addi(Rscratch2, Rscratch2, increment); 1723 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1724 if (UseOnStackReplacement) { 1725 __ and_(Rscratch3, Rscratch2, Rscratch3); 1726 __ bne(CCR0, Lforward); 1727 __ b(Loverflow); 1728 } else { 1729 __ b(Lforward); 1730 } 1731 } 1732 1733 // If there's no MDO, increment counter in method. 1734 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1735 __ bind(Lno_mdo); 1736 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1737 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters); 1738 __ addi(Rscratch2, Rscratch2, increment); 1739 __ stw(Rscratch2, mo_bc_offs, R4_counters); 1740 if (UseOnStackReplacement) { 1741 __ and_(Rscratch3, Rscratch2, Rscratch3); 1742 __ bne(CCR0, Lforward); 1743 } else { 1744 __ b(Lforward); 1745 } 1746 __ bind(Loverflow); 1747 1748 // Notify point for loop, pass branch bytecode. 1749 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp). 1750 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); 1751 1752 // Was an OSR adapter generated? 1753 __ cmpdi(CCR0, R3_RET, 0); 1754 __ beq(CCR0, Lforward); 1755 1756 // Has the nmethod been invalidated already? 1757 __ lbz(R0, nmethod::state_offset(), R3_RET); 1758 __ cmpwi(CCR0, R0, nmethod::in_use); 1759 __ bne(CCR0, Lforward); 1760 1761 // Migrate the interpreter frame off of the stack. 1762 // We can use all registers because we will not return to interpreter from this point. 1763 1764 // Save nmethod. 1765 const Register osr_nmethod = R31; 1766 __ mr(osr_nmethod, R3_RET); 1767 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1768 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1769 __ reset_last_Java_frame(); 1770 // OSR buffer is in ARG1. 1771 1772 // Remove the interpreter frame. 1773 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1774 1775 // Jump to the osr code. 1776 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1777 __ mtlr(R0); 1778 __ mtctr(R11_scratch1); 1779 __ bctr(); 1780 1781 } else { 1782 1783 const Register invoke_ctr = Rscratch1; 1784 // Update Backedge branch separately from invocations. 1785 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1786 1787 if (ProfileInterpreter) { 1788 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward); 1789 if (UseOnStackReplacement) { 1790 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2); 1791 } 1792 } else { 1793 if (UseOnStackReplacement) { 1794 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2); 1795 } 1796 } 1797 } 1798 1799 __ bind(Lforward); 1800 } 1801 __ dispatch_next(vtos, 0, true); 1802 } 1803 1804 // Helper function for if_cmp* methods below. 1805 // Factored out common compare and branch code. 1806 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1807 Label Lnot_taken; 1808 // Note: The condition code we get is the condition under which we 1809 // *fall through*! So we have to inverse the CC here. 1810 1811 if (is_jint) { 1812 if (cmp0) { 1813 __ cmpwi(CCR0, Rfirst, 0); 1814 } else { 1815 __ cmpw(CCR0, Rfirst, Rsecond); 1816 } 1817 } else { 1818 if (cmp0) { 1819 __ cmpdi(CCR0, Rfirst, 0); 1820 } else { 1821 __ cmpd(CCR0, Rfirst, Rsecond); 1822 } 1823 } 1824 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1825 1826 // Conition is false => Jump! 1827 branch(false, false); 1828 1829 // Condition is not true => Continue. 1830 __ align(32, 12); 1831 __ bind(Lnot_taken); 1832 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1833 } 1834 1835 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1836 void TemplateTable::if_0cmp(Condition cc) { 1837 transition(itos, vtos); 1838 1839 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1840 } 1841 1842 // Compare integer values and fall through if CC holds, branch away otherwise. 1843 // 1844 // Interface: 1845 // - Rfirst: First operand (older stack value) 1846 // - tos: Second operand (younger stack value) 1847 void TemplateTable::if_icmp(Condition cc) { 1848 transition(itos, vtos); 1849 1850 const Register Rfirst = R0, 1851 Rsecond = R17_tos; 1852 1853 __ pop_i(Rfirst); 1854 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1855 } 1856 1857 void TemplateTable::if_nullcmp(Condition cc) { 1858 transition(atos, vtos); 1859 1860 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1861 } 1862 1863 void TemplateTable::if_acmp(Condition cc) { 1864 transition(atos, vtos); 1865 1866 const Register Rfirst = R0, 1867 Rsecond = R17_tos; 1868 1869 __ pop_ptr(Rfirst); 1870 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1871 } 1872 1873 void TemplateTable::ret() { 1874 locals_index(R11_scratch1); 1875 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1876 1877 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1878 1879 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1880 __ add(R11_scratch1, R17_tos, R11_scratch1); 1881 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1882 __ dispatch_next(vtos, 0, true); 1883 } 1884 1885 void TemplateTable::wide_ret() { 1886 transition(vtos, vtos); 1887 1888 const Register Rindex = R3_ARG1, 1889 Rscratch1 = R11_scratch1, 1890 Rscratch2 = R12_scratch2; 1891 1892 locals_index_wide(Rindex); 1893 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1894 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1895 // Tos now contains the bci, compute the bcp from that. 1896 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1897 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1898 __ add(R14_bcp, Rscratch1, Rscratch2); 1899 __ dispatch_next(vtos, 0, true); 1900 } 1901 1902 void TemplateTable::tableswitch() { 1903 transition(itos, vtos); 1904 1905 Label Ldispatch, Ldefault_case; 1906 Register Rlow_byte = R3_ARG1, 1907 Rindex = Rlow_byte, 1908 Rhigh_byte = R4_ARG2, 1909 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1910 Rscratch1 = R11_scratch1, 1911 Rscratch2 = R12_scratch2, 1912 Roffset = R6_ARG4; 1913 1914 // Align bcp. 1915 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1916 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1917 1918 // Load lo & hi. 1919 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1920 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1921 1922 // Check for default case (=index outside [low,high]). 1923 __ cmpw(CCR0, R17_tos, Rlow_byte); 1924 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1925 __ blt(CCR0, Ldefault_case); 1926 __ bgt(CCR1, Ldefault_case); 1927 1928 // Lookup dispatch offset. 1929 __ sub(Rindex, R17_tos, Rlow_byte); 1930 __ extsw(Rindex, Rindex); 1931 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1932 __ sldi(Rindex, Rindex, LogBytesPerInt); 1933 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1934 #if defined(VM_LITTLE_ENDIAN) 1935 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1936 __ extsw(Roffset, Roffset); 1937 #else 1938 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1939 #endif 1940 __ b(Ldispatch); 1941 1942 __ bind(Ldefault_case); 1943 __ profile_switch_default(Rhigh_byte, Rscratch1); 1944 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1945 1946 __ bind(Ldispatch); 1947 1948 __ add(R14_bcp, Roffset, R14_bcp); 1949 __ dispatch_next(vtos, 0, true); 1950 } 1951 1952 void TemplateTable::lookupswitch() { 1953 transition(itos, itos); 1954 __ stop("lookupswitch bytecode should have been rewritten"); 1955 } 1956 1957 // Table switch using linear search through cases. 1958 // Bytecode stream format: 1959 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1960 // Note: Everything is big-endian format here. 1961 void TemplateTable::fast_linearswitch() { 1962 transition(itos, vtos); 1963 1964 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1965 Register Rcount = R3_ARG1, 1966 Rcurrent_pair = R4_ARG2, 1967 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1968 Roffset = R31, // Might need to survive C call. 1969 Rvalue = R12_scratch2, 1970 Rscratch = R11_scratch1, 1971 Rcmp_value = R17_tos; 1972 1973 // Align bcp. 1974 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1975 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1976 1977 // Setup loop counter and limit. 1978 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1979 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1980 1981 __ mtctr(Rcount); 1982 __ cmpwi(CCR0, Rcount, 0); 1983 __ bne(CCR0, Lloop_entry); 1984 1985 // Default case 1986 __ bind(Ldefault_case); 1987 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1988 if (ProfileInterpreter) { 1989 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1990 } 1991 __ b(Lcontinue_execution); 1992 1993 // Next iteration 1994 __ bind(Lsearch_loop); 1995 __ bdz(Ldefault_case); 1996 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1997 __ bind(Lloop_entry); 1998 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1999 __ cmpw(CCR0, Rvalue, Rcmp_value); 2000 __ bne(CCR0, Lsearch_loop); 2001 2002 // Found, load offset. 2003 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 2004 // Calculate case index and profile 2005 __ mfctr(Rcurrent_pair); 2006 if (ProfileInterpreter) { 2007 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 2008 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 2009 } 2010 2011 __ bind(Lcontinue_execution); 2012 __ add(R14_bcp, Roffset, R14_bcp); 2013 __ dispatch_next(vtos, 0, true); 2014 } 2015 2016 // Table switch using binary search (value/offset pairs are ordered). 2017 // Bytecode stream format: 2018 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 2019 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 2020 void TemplateTable::fast_binaryswitch() { 2021 2022 transition(itos, vtos); 2023 // Implementation using the following core algorithm: (copied from Intel) 2024 // 2025 // int binary_search(int key, LookupswitchPair* array, int n) { 2026 // // Binary search according to "Methodik des Programmierens" by 2027 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 2028 // int i = 0; 2029 // int j = n; 2030 // while (i+1 < j) { 2031 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 2032 // // with Q: for all i: 0 <= i < n: key < a[i] 2033 // // where a stands for the array and assuming that the (inexisting) 2034 // // element a[n] is infinitely big. 2035 // int h = (i + j) >> 1; 2036 // // i < h < j 2037 // if (key < array[h].fast_match()) { 2038 // j = h; 2039 // } else { 2040 // i = h; 2041 // } 2042 // } 2043 // // R: a[i] <= key < a[i+1] or Q 2044 // // (i.e., if key is within array, i is the correct index) 2045 // return i; 2046 // } 2047 2048 // register allocation 2049 const Register Rkey = R17_tos; // already set (tosca) 2050 const Register Rarray = R3_ARG1; 2051 const Register Ri = R4_ARG2; 2052 const Register Rj = R5_ARG3; 2053 const Register Rh = R6_ARG4; 2054 const Register Rscratch = R11_scratch1; 2055 2056 const int log_entry_size = 3; 2057 const int entry_size = 1 << log_entry_size; 2058 2059 Label found; 2060 2061 // Find Array start, 2062 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 2063 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 2064 2065 // initialize i & j 2066 __ li(Ri,0); 2067 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2068 2069 // and start. 2070 Label entry; 2071 __ b(entry); 2072 2073 // binary search loop 2074 { Label loop; 2075 __ bind(loop); 2076 // int h = (i + j) >> 1; 2077 __ srdi(Rh, Rh, 1); 2078 // if (key < array[h].fast_match()) { 2079 // j = h; 2080 // } else { 2081 // i = h; 2082 // } 2083 __ sldi(Rscratch, Rh, log_entry_size); 2084 #if defined(VM_LITTLE_ENDIAN) 2085 __ lwbrx(Rscratch, Rscratch, Rarray); 2086 #else 2087 __ lwzx(Rscratch, Rscratch, Rarray); 2088 #endif 2089 2090 // if (key < current value) 2091 // Rh = Rj 2092 // else 2093 // Rh = Ri 2094 Label Lgreater; 2095 __ cmpw(CCR0, Rkey, Rscratch); 2096 __ bge(CCR0, Lgreater); 2097 __ mr(Rj, Rh); 2098 __ b(entry); 2099 __ bind(Lgreater); 2100 __ mr(Ri, Rh); 2101 2102 // while (i+1 < j) 2103 __ bind(entry); 2104 __ addi(Rscratch, Ri, 1); 2105 __ cmpw(CCR0, Rscratch, Rj); 2106 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2107 2108 __ blt(CCR0, loop); 2109 } 2110 2111 // End of binary search, result index is i (must check again!). 2112 Label default_case; 2113 Label continue_execution; 2114 if (ProfileInterpreter) { 2115 __ mr(Rh, Ri); // Save index in i for profiling. 2116 } 2117 // Ri = value offset 2118 __ sldi(Ri, Ri, log_entry_size); 2119 __ add(Ri, Ri, Rarray); 2120 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2121 2122 Label not_found; 2123 // Ri = offset offset 2124 __ cmpw(CCR0, Rkey, Rscratch); 2125 __ beq(CCR0, not_found); 2126 // entry not found -> j = default offset 2127 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2128 __ b(default_case); 2129 2130 __ bind(not_found); 2131 // entry found -> j = offset 2132 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2133 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2134 2135 if (ProfileInterpreter) { 2136 __ b(continue_execution); 2137 } 2138 2139 __ bind(default_case); // fall through (if not profiling) 2140 __ profile_switch_default(Ri, Rscratch); 2141 2142 __ bind(continue_execution); 2143 2144 __ extsw(Rj, Rj); 2145 __ add(R14_bcp, Rj, R14_bcp); 2146 __ dispatch_next(vtos, 0 , true); 2147 } 2148 2149 void TemplateTable::_return(TosState state) { 2150 transition(state, state); 2151 assert(_desc->calls_vm(), 2152 "inconsistent calls_vm information"); // call in remove_activation 2153 2154 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2155 2156 Register Rscratch = R11_scratch1, 2157 Rklass = R12_scratch2, 2158 Rklass_flags = Rklass; 2159 Label Lskip_register_finalizer; 2160 2161 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2162 assert(state == vtos, "only valid state"); 2163 __ ld(R17_tos, 0, R18_locals); 2164 2165 // Load klass of this obj. 2166 __ load_klass(Rklass, R17_tos); 2167 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2168 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2169 __ bfalse(CCR0, Lskip_register_finalizer); 2170 2171 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2172 2173 __ align(32, 12); 2174 __ bind(Lskip_register_finalizer); 2175 } 2176 2177 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) { 2178 Label no_safepoint; 2179 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread); 2180 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit()); 2181 __ beq(CCR0, no_safepoint); 2182 __ push(state); 2183 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint)); 2184 __ pop(state); 2185 __ bind(no_safepoint); 2186 } 2187 2188 // Move the result value into the correct register and remove memory stack frame. 2189 __ remove_activation(state, /* throw_monitor_exception */ true); 2190 // Restoration of lr done by remove_activation. 2191 switch (state) { 2192 // Narrow result if state is itos but result type is smaller. 2193 // Need to narrow in the return bytecode rather than in generate_return_entry 2194 // since compiled code callers expect the result to already be narrowed. 2195 case itos: __ narrow(R17_tos); /* fall through */ 2196 case ltos: 2197 case atos: __ mr(R3_RET, R17_tos); break; 2198 case ftos: 2199 case dtos: __ fmr(F1_RET, F15_ftos); break; 2200 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2201 // to get visible before the reference to the object gets stored anywhere. 2202 __ membar(Assembler::StoreStore); break; 2203 default : ShouldNotReachHere(); 2204 } 2205 __ blr(); 2206 } 2207 2208 // ============================================================================ 2209 // Constant pool cache access 2210 // 2211 // Memory ordering: 2212 // 2213 // Like done in C++ interpreter, we load the fields 2214 // - _indices 2215 // - _f12_oop 2216 // acquired, because these are asked if the cache is already resolved. We don't 2217 // want to float loads above this check. 2218 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2219 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2220 2221 // Call into the VM if call site is not yet resolved 2222 // 2223 // Input regs: 2224 // - None, all passed regs are outputs. 2225 // 2226 // Returns: 2227 // - Rcache: The const pool cache entry that contains the resolved result. 2228 // - Rresult: Either noreg or output for f1/f2. 2229 // 2230 // Kills: 2231 // - Rscratch 2232 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2233 2234 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2235 Label Lresolved, Ldone, L_clinit_barrier_slow; 2236 2237 Bytecodes::Code code = bytecode(); 2238 switch (code) { 2239 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2240 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2241 default: 2242 break; 2243 } 2244 2245 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2246 // We are resolved if the indices offset contains the current bytecode. 2247 #if defined(VM_LITTLE_ENDIAN) 2248 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2249 #else 2250 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2251 #endif 2252 // Acquire by cmp-br-isync (see below). 2253 __ cmpdi(CCR0, Rscratch, (int)code); 2254 __ beq(CCR0, Lresolved); 2255 2256 // Class initialization barrier slow path lands here as well. 2257 __ bind(L_clinit_barrier_slow); 2258 2259 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2260 __ li(R4_ARG2, code); 2261 __ call_VM(noreg, entry, R4_ARG2, true); 2262 2263 // Update registers with resolved info. 2264 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2265 __ b(Ldone); 2266 2267 __ bind(Lresolved); 2268 __ isync(); // Order load wrt. succeeding loads. 2269 2270 // Class initialization barrier for static methods 2271 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) { 2272 const Register method = Rscratch; 2273 const Register klass = Rscratch; 2274 2275 __ load_resolved_method_at_index(byte_no, Rcache, method); 2276 __ load_method_holder(klass, method); 2277 __ clinit_barrier(klass, R16_thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow); 2278 } 2279 2280 __ bind(Ldone); 2281 } 2282 2283 // Load the constant pool cache entry at field accesses into registers. 2284 // The Rcache and Rindex registers must be set before call. 2285 // Input: 2286 // - Rcache, Rindex 2287 // Output: 2288 // - Robj, Roffset, Rflags 2289 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2290 Register Rcache, 2291 Register Rindex /* unused on PPC64 */, 2292 Register Roffset, 2293 Register Rflags, 2294 bool is_static = false) { 2295 assert_different_registers(Rcache, Rflags, Roffset); 2296 // assert(Rindex == noreg, "parameter not used on PPC64"); 2297 2298 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2299 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2300 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2301 if (is_static) { 2302 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2303 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2304 __ resolve_oop_handle(Robj); 2305 // Acquire not needed here. Following access has an address dependency on this value. 2306 } 2307 } 2308 2309 // Load the constant pool cache entry at invokes into registers. 2310 // Resolve if necessary. 2311 2312 // Input Registers: 2313 // - None, bcp is used, though 2314 // 2315 // Return registers: 2316 // - Rmethod (f1 field or f2 if invokevirtual) 2317 // - Ritable_index (f2 field) 2318 // - Rflags (flags field) 2319 // 2320 // Kills: 2321 // - R21 2322 // 2323 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2324 Register Rmethod, 2325 Register Ritable_index, 2326 Register Rflags, 2327 bool is_invokevirtual, 2328 bool is_invokevfinal, 2329 bool is_invokedynamic) { 2330 2331 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2332 // Determine constant pool cache field offsets. 2333 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2334 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2335 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2336 // Access constant pool cache fields. 2337 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2338 2339 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2340 2341 if (is_invokevfinal) { 2342 assert(Ritable_index == noreg, "register not used"); 2343 // Already resolved. 2344 __ get_cache_and_index_at_bcp(Rcache, 1); 2345 } else { 2346 resolve_cache_and_index(byte_no, Rcache, /* temp */ Rmethod, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2347 } 2348 2349 __ ld(Rmethod, method_offset, Rcache); 2350 __ ld(Rflags, flags_offset, Rcache); 2351 2352 if (Ritable_index != noreg) { 2353 __ ld(Ritable_index, index_offset, Rcache); 2354 } 2355 } 2356 2357 // ============================================================================ 2358 // Field access 2359 2360 // Volatile variables demand their effects be made known to all CPU's 2361 // in order. Store buffers on most chips allow reads & writes to 2362 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2363 // without some kind of memory barrier (i.e., it's not sufficient that 2364 // the interpreter does not reorder volatile references, the hardware 2365 // also must not reorder them). 2366 // 2367 // According to the new Java Memory Model (JMM): 2368 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2369 // writes act as aquire & release, so: 2370 // (2) A read cannot let unrelated NON-volatile memory refs that 2371 // happen after the read float up to before the read. It's OK for 2372 // non-volatile memory refs that happen before the volatile read to 2373 // float down below it. 2374 // (3) Similar a volatile write cannot let unrelated NON-volatile 2375 // memory refs that happen BEFORE the write float down to after the 2376 // write. It's OK for non-volatile memory refs that happen after the 2377 // volatile write to float up before it. 2378 // 2379 // We only put in barriers around volatile refs (they are expensive), 2380 // not _between_ memory refs (that would require us to track the 2381 // flavor of the previous memory refs). Requirements (2) and (3) 2382 // require some barriers before volatile stores and after volatile 2383 // loads. These nearly cover requirement (1) but miss the 2384 // volatile-store-volatile-load case. This final case is placed after 2385 // volatile-stores although it could just as well go before 2386 // volatile-loads. 2387 2388 // The registers cache and index expected to be set before call. 2389 // Correct values of the cache and index registers are preserved. 2390 // Kills: 2391 // Rcache (if has_tos) 2392 // Rscratch 2393 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2394 2395 assert_different_registers(Rcache, Rscratch); 2396 2397 if (JvmtiExport::can_post_field_access()) { 2398 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2399 Label Lno_field_access_post; 2400 2401 // Check if post field access in enabled. 2402 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2403 __ lwz(Rscratch, offs, Rscratch); 2404 2405 __ cmpwi(CCR0, Rscratch, 0); 2406 __ beq(CCR0, Lno_field_access_post); 2407 2408 // Post access enabled - do it! 2409 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2410 if (is_static) { 2411 __ li(R17_tos, 0); 2412 } else { 2413 if (has_tos) { 2414 // The fast bytecode versions have obj ptr in register. 2415 // Thus, save object pointer before call_VM() clobbers it 2416 // put object on tos where GC wants it. 2417 __ push_ptr(R17_tos); 2418 } else { 2419 // Load top of stack (do not pop the value off the stack). 2420 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2421 } 2422 __ verify_oop(R17_tos); 2423 } 2424 // tos: object pointer or NULL if static 2425 // cache: cache entry pointer 2426 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2427 if (!is_static && has_tos) { 2428 // Restore object pointer. 2429 __ pop_ptr(R17_tos); 2430 __ verify_oop(R17_tos); 2431 } else { 2432 // Cache is still needed to get class or obj. 2433 __ get_cache_and_index_at_bcp(Rcache, 1); 2434 } 2435 2436 __ align(32, 12); 2437 __ bind(Lno_field_access_post); 2438 } 2439 } 2440 2441 // kills R11_scratch1 2442 void TemplateTable::pop_and_check_object(Register Roop) { 2443 Register Rtmp = R11_scratch1; 2444 2445 assert_different_registers(Rtmp, Roop); 2446 __ pop_ptr(Roop); 2447 // For field access must check obj. 2448 __ null_check_throw(Roop, -1, Rtmp); 2449 __ verify_oop(Roop); 2450 } 2451 2452 // PPC64: implement volatile loads as fence-store-acquire. 2453 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2454 transition(vtos, vtos); 2455 2456 Label Lacquire, Lisync; 2457 2458 const Register Rcache = R3_ARG1, 2459 Rclass_or_obj = R22_tmp2, 2460 Roffset = R23_tmp3, 2461 Rflags = R31, 2462 Rbtable = R5_ARG3, 2463 Rbc = R6_ARG4, 2464 Rscratch = R12_scratch2; 2465 2466 static address field_branch_table[number_of_states], 2467 static_branch_table[number_of_states]; 2468 2469 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2470 2471 // Get field offset. 2472 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2473 2474 // JVMTI support 2475 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2476 2477 // Load after possible GC. 2478 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2479 2480 // Load pointer to branch table. 2481 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2482 2483 // Get volatile flag. 2484 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2485 // Note: sync is needed before volatile load on PPC64. 2486 2487 // Check field type. 2488 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2489 2490 #ifdef ASSERT 2491 Label LFlagInvalid; 2492 __ cmpldi(CCR0, Rflags, number_of_states); 2493 __ bge(CCR0, LFlagInvalid); 2494 #endif 2495 2496 // Load from branch table and dispatch (volatile case: one instruction ahead). 2497 __ sldi(Rflags, Rflags, LogBytesPerWord); 2498 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2499 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2500 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2501 } 2502 __ ldx(Rbtable, Rbtable, Rflags); 2503 2504 // Get the obj from stack. 2505 if (!is_static) { 2506 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2507 } else { 2508 __ verify_oop(Rclass_or_obj); 2509 } 2510 2511 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2512 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2513 } 2514 __ mtctr(Rbtable); 2515 __ bctr(); 2516 2517 #ifdef ASSERT 2518 __ bind(LFlagInvalid); 2519 __ stop("got invalid flag", 0x654); 2520 #endif 2521 2522 if (!is_static && rc == may_not_rewrite) { 2523 // We reuse the code from is_static. It's jumped to via the table above. 2524 return; 2525 } 2526 2527 #ifdef ASSERT 2528 // __ bind(Lvtos); 2529 address pc_before_fence = __ pc(); 2530 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2531 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2532 assert(branch_table[vtos] == 0, "can't compute twice"); 2533 branch_table[vtos] = __ pc(); // non-volatile_entry point 2534 __ stop("vtos unexpected", 0x655); 2535 #endif 2536 2537 __ align(32, 28, 28); // Align load. 2538 // __ bind(Ldtos); 2539 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2540 assert(branch_table[dtos] == 0, "can't compute twice"); 2541 branch_table[dtos] = __ pc(); // non-volatile_entry point 2542 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2543 __ push(dtos); 2544 if (!is_static && rc == may_rewrite) { 2545 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2546 } 2547 { 2548 Label acquire_double; 2549 __ beq(CCR6, acquire_double); // Volatile? 2550 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2551 2552 __ bind(acquire_double); 2553 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2554 __ beq_predict_taken(CCR0, Lisync); 2555 __ b(Lisync); // In case of NAN. 2556 } 2557 2558 __ align(32, 28, 28); // Align load. 2559 // __ bind(Lftos); 2560 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2561 assert(branch_table[ftos] == 0, "can't compute twice"); 2562 branch_table[ftos] = __ pc(); // non-volatile_entry point 2563 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2564 __ push(ftos); 2565 if (!is_static && rc == may_rewrite) { 2566 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2567 } 2568 { 2569 Label acquire_float; 2570 __ beq(CCR6, acquire_float); // Volatile? 2571 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2572 2573 __ bind(acquire_float); 2574 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2575 __ beq_predict_taken(CCR0, Lisync); 2576 __ b(Lisync); // In case of NAN. 2577 } 2578 2579 __ align(32, 28, 28); // Align load. 2580 // __ bind(Litos); 2581 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2582 assert(branch_table[itos] == 0, "can't compute twice"); 2583 branch_table[itos] = __ pc(); // non-volatile_entry point 2584 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2585 __ push(itos); 2586 if (!is_static && rc == may_rewrite) { 2587 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2588 } 2589 __ beq(CCR6, Lacquire); // Volatile? 2590 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2591 2592 __ align(32, 28, 28); // Align load. 2593 // __ bind(Lltos); 2594 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2595 assert(branch_table[ltos] == 0, "can't compute twice"); 2596 branch_table[ltos] = __ pc(); // non-volatile_entry point 2597 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2598 __ push(ltos); 2599 if (!is_static && rc == may_rewrite) { 2600 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2601 } 2602 __ beq(CCR6, Lacquire); // Volatile? 2603 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2604 2605 __ align(32, 28, 28); // Align load. 2606 // __ bind(Lbtos); 2607 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2608 assert(branch_table[btos] == 0, "can't compute twice"); 2609 branch_table[btos] = __ pc(); // non-volatile_entry point 2610 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2611 __ extsb(R17_tos, R17_tos); 2612 __ push(btos); 2613 if (!is_static && rc == may_rewrite) { 2614 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2615 } 2616 __ beq(CCR6, Lacquire); // Volatile? 2617 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2618 2619 __ align(32, 28, 28); // Align load. 2620 // __ bind(Lztos); (same code as btos) 2621 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2622 assert(branch_table[ztos] == 0, "can't compute twice"); 2623 branch_table[ztos] = __ pc(); // non-volatile_entry point 2624 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2625 __ push(ztos); 2626 if (!is_static && rc == may_rewrite) { 2627 // use btos rewriting, no truncating to t/f bit is needed for getfield. 2628 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2629 } 2630 __ beq(CCR6, Lacquire); // Volatile? 2631 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2632 2633 __ align(32, 28, 28); // Align load. 2634 // __ bind(Lctos); 2635 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2636 assert(branch_table[ctos] == 0, "can't compute twice"); 2637 branch_table[ctos] = __ pc(); // non-volatile_entry point 2638 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2639 __ push(ctos); 2640 if (!is_static && rc == may_rewrite) { 2641 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2642 } 2643 __ beq(CCR6, Lacquire); // Volatile? 2644 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2645 2646 __ align(32, 28, 28); // Align load. 2647 // __ bind(Lstos); 2648 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2649 assert(branch_table[stos] == 0, "can't compute twice"); 2650 branch_table[stos] = __ pc(); // non-volatile_entry point 2651 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2652 __ push(stos); 2653 if (!is_static && rc == may_rewrite) { 2654 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2655 } 2656 __ beq(CCR6, Lacquire); // Volatile? 2657 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2658 2659 __ align(32, 28, 28); // Align load. 2660 // __ bind(Latos); 2661 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2662 assert(branch_table[atos] == 0, "can't compute twice"); 2663 branch_table[atos] = __ pc(); // non-volatile_entry point 2664 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 2665 __ verify_oop(R17_tos); 2666 __ push(atos); 2667 //__ dcbt(R17_tos); // prefetch 2668 if (!is_static && rc == may_rewrite) { 2669 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2670 } 2671 __ beq(CCR6, Lacquire); // Volatile? 2672 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2673 2674 __ align(32, 12); 2675 __ bind(Lacquire); 2676 __ twi_0(R17_tos); 2677 __ bind(Lisync); 2678 __ isync(); // acquire 2679 2680 #ifdef ASSERT 2681 for (int i = 0; i<number_of_states; ++i) { 2682 assert(branch_table[i], "get initialization"); 2683 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2684 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2685 } 2686 #endif 2687 } 2688 2689 void TemplateTable::getfield(int byte_no) { 2690 getfield_or_static(byte_no, false); 2691 } 2692 2693 void TemplateTable::nofast_getfield(int byte_no) { 2694 getfield_or_static(byte_no, false, may_not_rewrite); 2695 } 2696 2697 void TemplateTable::getstatic(int byte_no) { 2698 getfield_or_static(byte_no, true); 2699 } 2700 2701 // The registers cache and index expected to be set before call. 2702 // The function may destroy various registers, just not the cache and index registers. 2703 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2704 2705 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2706 2707 if (JvmtiExport::can_post_field_modification()) { 2708 Label Lno_field_mod_post; 2709 2710 // Check if post field access in enabled. 2711 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2712 __ lwz(Rscratch, offs, Rscratch); 2713 2714 __ cmpwi(CCR0, Rscratch, 0); 2715 __ beq(CCR0, Lno_field_mod_post); 2716 2717 // Do the post 2718 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2719 const Register Robj = Rscratch; 2720 2721 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2722 if (is_static) { 2723 // Life is simple. Null out the object pointer. 2724 __ li(Robj, 0); 2725 } else { 2726 // In case of the fast versions, value lives in registers => put it back on tos. 2727 int offs = Interpreter::expr_offset_in_bytes(0); 2728 Register base = R15_esp; 2729 switch(bytecode()) { 2730 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2731 case Bytecodes::_fast_iputfield: // Fall through 2732 case Bytecodes::_fast_bputfield: // Fall through 2733 case Bytecodes::_fast_zputfield: // Fall through 2734 case Bytecodes::_fast_cputfield: // Fall through 2735 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2736 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2737 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2738 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2739 default: { 2740 offs = 0; 2741 base = Robj; 2742 const Register Rflags = Robj; 2743 Label is_one_slot; 2744 // Life is harder. The stack holds the value on top, followed by the 2745 // object. We don't know the size of the value, though; it could be 2746 // one or two words depending on its type. As a result, we must find 2747 // the type to determine where the object is. 2748 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2749 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2750 2751 __ cmpwi(CCR0, Rflags, ltos); 2752 __ cmpwi(CCR1, Rflags, dtos); 2753 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2754 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2755 __ beq(CCR0, is_one_slot); 2756 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2757 __ bind(is_one_slot); 2758 break; 2759 } 2760 } 2761 __ ld(Robj, offs, base); 2762 __ verify_oop(Robj); 2763 } 2764 2765 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2766 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2767 __ get_cache_and_index_at_bcp(Rcache, 1); 2768 2769 // In case of the fast versions, value lives in registers => put it back on tos. 2770 switch(bytecode()) { 2771 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2772 case Bytecodes::_fast_iputfield: // Fall through 2773 case Bytecodes::_fast_bputfield: // Fall through 2774 case Bytecodes::_fast_zputfield: // Fall through 2775 case Bytecodes::_fast_cputfield: // Fall through 2776 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2777 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2778 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2779 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2780 default: break; // Nothin' to do. 2781 } 2782 2783 __ align(32, 12); 2784 __ bind(Lno_field_mod_post); 2785 } 2786 } 2787 2788 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2789 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2790 Label Lvolatile; 2791 2792 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2793 Rclass_or_obj = R31, // Needs to survive C call. 2794 Roffset = R22_tmp2, // Needs to survive C call. 2795 Rflags = R3_ARG1, 2796 Rbtable = R4_ARG2, 2797 Rscratch = R11_scratch1, 2798 Rscratch2 = R12_scratch2, 2799 Rscratch3 = R6_ARG4, 2800 Rbc = Rscratch3; 2801 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2802 2803 static address field_rw_branch_table[number_of_states], 2804 field_norw_branch_table[number_of_states], 2805 static_branch_table[number_of_states]; 2806 2807 address* branch_table = is_static ? static_branch_table : 2808 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2809 2810 // Stack (grows up): 2811 // value 2812 // obj 2813 2814 // Load the field offset. 2815 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2816 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2817 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2818 2819 // Load pointer to branch table. 2820 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2821 2822 // Get volatile flag. 2823 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2824 2825 // Check the field type. 2826 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2827 2828 #ifdef ASSERT 2829 Label LFlagInvalid; 2830 __ cmpldi(CCR0, Rflags, number_of_states); 2831 __ bge(CCR0, LFlagInvalid); 2832 #endif 2833 2834 // Load from branch table and dispatch (volatile case: one instruction ahead). 2835 __ sldi(Rflags, Rflags, LogBytesPerWord); 2836 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2837 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2838 } 2839 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2840 __ ldx(Rbtable, Rbtable, Rflags); 2841 2842 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2843 __ mtctr(Rbtable); 2844 __ bctr(); 2845 2846 #ifdef ASSERT 2847 __ bind(LFlagInvalid); 2848 __ stop("got invalid flag", 0x656); 2849 2850 // __ bind(Lvtos); 2851 address pc_before_release = __ pc(); 2852 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2853 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2854 assert(branch_table[vtos] == 0, "can't compute twice"); 2855 branch_table[vtos] = __ pc(); // non-volatile_entry point 2856 __ stop("vtos unexpected", 0x657); 2857 #endif 2858 2859 __ align(32, 28, 28); // Align pop. 2860 // __ bind(Ldtos); 2861 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2862 assert(branch_table[dtos] == 0, "can't compute twice"); 2863 branch_table[dtos] = __ pc(); // non-volatile_entry point 2864 __ pop(dtos); 2865 if (!is_static) { 2866 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2867 } 2868 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2869 if (!is_static && rc == may_rewrite) { 2870 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2871 } 2872 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2873 __ beq(CR_is_vol, Lvolatile); // Volatile? 2874 } 2875 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2876 2877 __ align(32, 28, 28); // Align pop. 2878 // __ bind(Lftos); 2879 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2880 assert(branch_table[ftos] == 0, "can't compute twice"); 2881 branch_table[ftos] = __ pc(); // non-volatile_entry point 2882 __ pop(ftos); 2883 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2884 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2885 if (!is_static && rc == may_rewrite) { 2886 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2887 } 2888 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2889 __ beq(CR_is_vol, Lvolatile); // Volatile? 2890 } 2891 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2892 2893 __ align(32, 28, 28); // Align pop. 2894 // __ bind(Litos); 2895 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2896 assert(branch_table[itos] == 0, "can't compute twice"); 2897 branch_table[itos] = __ pc(); // non-volatile_entry point 2898 __ pop(itos); 2899 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2900 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2901 if (!is_static && rc == may_rewrite) { 2902 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2903 } 2904 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2905 __ beq(CR_is_vol, Lvolatile); // Volatile? 2906 } 2907 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2908 2909 __ align(32, 28, 28); // Align pop. 2910 // __ bind(Lltos); 2911 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2912 assert(branch_table[ltos] == 0, "can't compute twice"); 2913 branch_table[ltos] = __ pc(); // non-volatile_entry point 2914 __ pop(ltos); 2915 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2916 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2917 if (!is_static && rc == may_rewrite) { 2918 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2919 } 2920 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2921 __ beq(CR_is_vol, Lvolatile); // Volatile? 2922 } 2923 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2924 2925 __ align(32, 28, 28); // Align pop. 2926 // __ bind(Lbtos); 2927 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2928 assert(branch_table[btos] == 0, "can't compute twice"); 2929 branch_table[btos] = __ pc(); // non-volatile_entry point 2930 __ pop(btos); 2931 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2932 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2933 if (!is_static && rc == may_rewrite) { 2934 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2935 } 2936 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2937 __ beq(CR_is_vol, Lvolatile); // Volatile? 2938 } 2939 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2940 2941 __ align(32, 28, 28); // Align pop. 2942 // __ bind(Lztos); 2943 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2944 assert(branch_table[ztos] == 0, "can't compute twice"); 2945 branch_table[ztos] = __ pc(); // non-volatile_entry point 2946 __ pop(ztos); 2947 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2948 __ andi(R17_tos, R17_tos, 0x1); 2949 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2950 if (!is_static && rc == may_rewrite) { 2951 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no); 2952 } 2953 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2954 __ beq(CR_is_vol, Lvolatile); // Volatile? 2955 } 2956 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2957 2958 __ align(32, 28, 28); // Align pop. 2959 // __ bind(Lctos); 2960 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2961 assert(branch_table[ctos] == 0, "can't compute twice"); 2962 branch_table[ctos] = __ pc(); // non-volatile_entry point 2963 __ pop(ctos); 2964 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2965 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2966 if (!is_static && rc == may_rewrite) { 2967 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 2968 } 2969 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2970 __ beq(CR_is_vol, Lvolatile); // Volatile? 2971 } 2972 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2973 2974 __ align(32, 28, 28); // Align pop. 2975 // __ bind(Lstos); 2976 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2977 assert(branch_table[stos] == 0, "can't compute twice"); 2978 branch_table[stos] = __ pc(); // non-volatile_entry point 2979 __ pop(stos); 2980 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2981 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2982 if (!is_static && rc == may_rewrite) { 2983 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 2984 } 2985 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2986 __ beq(CR_is_vol, Lvolatile); // Volatile? 2987 } 2988 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2989 2990 __ align(32, 28, 28); // Align pop. 2991 // __ bind(Latos); 2992 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2993 assert(branch_table[atos] == 0, "can't compute twice"); 2994 branch_table[atos] = __ pc(); // non-volatile_entry point 2995 __ pop(atos); 2996 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2997 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP); 2998 if (!is_static && rc == may_rewrite) { 2999 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 3000 } 3001 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3002 __ beq(CR_is_vol, Lvolatile); // Volatile? 3003 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3004 3005 __ align(32, 12); 3006 __ bind(Lvolatile); 3007 __ fence(); 3008 } 3009 // fallthru: __ b(Lexit); 3010 3011 #ifdef ASSERT 3012 for (int i = 0; i<number_of_states; ++i) { 3013 assert(branch_table[i], "put initialization"); 3014 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 3015 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 3016 } 3017 #endif 3018 } 3019 3020 void TemplateTable::putfield(int byte_no) { 3021 putfield_or_static(byte_no, false); 3022 } 3023 3024 void TemplateTable::nofast_putfield(int byte_no) { 3025 putfield_or_static(byte_no, false, may_not_rewrite); 3026 } 3027 3028 void TemplateTable::putstatic(int byte_no) { 3029 putfield_or_static(byte_no, true); 3030 } 3031 3032 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 3033 void TemplateTable::jvmti_post_fast_field_mod() { 3034 __ should_not_reach_here(); 3035 } 3036 3037 void TemplateTable::fast_storefield(TosState state) { 3038 transition(state, vtos); 3039 3040 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 3041 Rclass_or_obj = R31, // Needs to survive C call. 3042 Roffset = R22_tmp2, // Needs to survive C call. 3043 Rflags = R3_ARG1, 3044 Rscratch = R11_scratch1, 3045 Rscratch2 = R12_scratch2, 3046 Rscratch3 = R4_ARG2; 3047 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 3048 3049 // Constant pool already resolved => Load flags and offset of field. 3050 __ get_cache_and_index_at_bcp(Rcache, 1); 3051 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 3052 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3053 3054 // Get the obj and the final store addr. 3055 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 3056 3057 // Get volatile flag. 3058 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3059 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 3060 { 3061 Label LnotVolatile; 3062 __ beq(CCR0, LnotVolatile); 3063 __ release(); 3064 __ align(32, 12); 3065 __ bind(LnotVolatile); 3066 } 3067 3068 // Do the store and fencing. 3069 switch(bytecode()) { 3070 case Bytecodes::_fast_aputfield: 3071 // Store into the field. 3072 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP); 3073 break; 3074 3075 case Bytecodes::_fast_iputfield: 3076 __ stwx(R17_tos, Rclass_or_obj, Roffset); 3077 break; 3078 3079 case Bytecodes::_fast_lputfield: 3080 __ stdx(R17_tos, Rclass_or_obj, Roffset); 3081 break; 3082 3083 case Bytecodes::_fast_zputfield: 3084 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1 3085 // fall through to bputfield 3086 case Bytecodes::_fast_bputfield: 3087 __ stbx(R17_tos, Rclass_or_obj, Roffset); 3088 break; 3089 3090 case Bytecodes::_fast_cputfield: 3091 case Bytecodes::_fast_sputfield: 3092 __ sthx(R17_tos, Rclass_or_obj, Roffset); 3093 break; 3094 3095 case Bytecodes::_fast_fputfield: 3096 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 3097 break; 3098 3099 case Bytecodes::_fast_dputfield: 3100 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 3101 break; 3102 3103 default: ShouldNotReachHere(); 3104 } 3105 3106 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 3107 Label LVolatile; 3108 __ beq(CR_is_vol, LVolatile); 3109 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 3110 3111 __ align(32, 12); 3112 __ bind(LVolatile); 3113 __ fence(); 3114 } 3115 } 3116 3117 void TemplateTable::fast_accessfield(TosState state) { 3118 transition(atos, state); 3119 3120 Label LisVolatile; 3121 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3122 3123 const Register Rcache = R3_ARG1, 3124 Rclass_or_obj = R17_tos, 3125 Roffset = R22_tmp2, 3126 Rflags = R23_tmp3, 3127 Rscratch = R12_scratch2; 3128 3129 // Constant pool already resolved. Get the field offset. 3130 __ get_cache_and_index_at_bcp(Rcache, 1); 3131 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3132 3133 // JVMTI support 3134 jvmti_post_field_access(Rcache, Rscratch, false, true); 3135 3136 // Get the load address. 3137 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3138 3139 // Get volatile flag. 3140 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3141 __ bne(CCR0, LisVolatile); 3142 3143 switch(bytecode()) { 3144 case Bytecodes::_fast_agetfield: 3145 { 3146 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3147 __ verify_oop(R17_tos); 3148 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3149 3150 __ bind(LisVolatile); 3151 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3152 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3153 __ verify_oop(R17_tos); 3154 __ twi_0(R17_tos); 3155 __ isync(); 3156 break; 3157 } 3158 case Bytecodes::_fast_igetfield: 3159 { 3160 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3161 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3162 3163 __ bind(LisVolatile); 3164 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3165 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3166 __ twi_0(R17_tos); 3167 __ isync(); 3168 break; 3169 } 3170 case Bytecodes::_fast_lgetfield: 3171 { 3172 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3173 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3174 3175 __ bind(LisVolatile); 3176 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3177 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3178 __ twi_0(R17_tos); 3179 __ isync(); 3180 break; 3181 } 3182 case Bytecodes::_fast_bgetfield: 3183 { 3184 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3185 __ extsb(R17_tos, R17_tos); 3186 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3187 3188 __ bind(LisVolatile); 3189 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3190 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3191 __ twi_0(R17_tos); 3192 __ extsb(R17_tos, R17_tos); 3193 __ isync(); 3194 break; 3195 } 3196 case Bytecodes::_fast_cgetfield: 3197 { 3198 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3199 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3200 3201 __ bind(LisVolatile); 3202 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3203 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3204 __ twi_0(R17_tos); 3205 __ isync(); 3206 break; 3207 } 3208 case Bytecodes::_fast_sgetfield: 3209 { 3210 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3211 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3212 3213 __ bind(LisVolatile); 3214 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3215 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3216 __ twi_0(R17_tos); 3217 __ isync(); 3218 break; 3219 } 3220 case Bytecodes::_fast_fgetfield: 3221 { 3222 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3223 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3224 3225 __ bind(LisVolatile); 3226 Label Ldummy; 3227 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3228 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3229 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3230 __ bne_predict_not_taken(CCR0, Ldummy); 3231 __ bind(Ldummy); 3232 __ isync(); 3233 break; 3234 } 3235 case Bytecodes::_fast_dgetfield: 3236 { 3237 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3238 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3239 3240 __ bind(LisVolatile); 3241 Label Ldummy; 3242 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3243 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3244 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3245 __ bne_predict_not_taken(CCR0, Ldummy); 3246 __ bind(Ldummy); 3247 __ isync(); 3248 break; 3249 } 3250 default: ShouldNotReachHere(); 3251 } 3252 } 3253 3254 void TemplateTable::fast_xaccess(TosState state) { 3255 transition(vtos, state); 3256 3257 Label LisVolatile; 3258 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3259 const Register Rcache = R3_ARG1, 3260 Rclass_or_obj = R17_tos, 3261 Roffset = R22_tmp2, 3262 Rflags = R23_tmp3, 3263 Rscratch = R12_scratch2; 3264 3265 __ ld(Rclass_or_obj, 0, R18_locals); 3266 3267 // Constant pool already resolved. Get the field offset. 3268 __ get_cache_and_index_at_bcp(Rcache, 2); 3269 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3270 3271 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3272 3273 // Needed to report exception at the correct bcp. 3274 __ addi(R14_bcp, R14_bcp, 1); 3275 3276 // Get the load address. 3277 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3278 3279 // Get volatile flag. 3280 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3281 __ bne(CCR0, LisVolatile); 3282 3283 switch(state) { 3284 case atos: 3285 { 3286 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3287 __ verify_oop(R17_tos); 3288 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3289 3290 __ bind(LisVolatile); 3291 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3292 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP); 3293 __ verify_oop(R17_tos); 3294 __ twi_0(R17_tos); 3295 __ isync(); 3296 break; 3297 } 3298 case itos: 3299 { 3300 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3301 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3302 3303 __ bind(LisVolatile); 3304 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3305 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3306 __ twi_0(R17_tos); 3307 __ isync(); 3308 break; 3309 } 3310 case ftos: 3311 { 3312 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3313 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3314 3315 __ bind(LisVolatile); 3316 Label Ldummy; 3317 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3318 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3319 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3320 __ bne_predict_not_taken(CCR0, Ldummy); 3321 __ bind(Ldummy); 3322 __ isync(); 3323 break; 3324 } 3325 default: ShouldNotReachHere(); 3326 } 3327 __ addi(R14_bcp, R14_bcp, -1); 3328 } 3329 3330 // ============================================================================ 3331 // Calls 3332 3333 // Common code for invoke 3334 // 3335 // Input: 3336 // - byte_no 3337 // 3338 // Output: 3339 // - Rmethod: The method to invoke next or i-klass (invokeinterface). 3340 // - Rret_addr: The return address to return to. 3341 // - Rindex: MethodType (invokehandle), CallSite obj (invokedynamic) or Method (invokeinterface) 3342 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3343 // - Rflags: Method flags from const pool cache. 3344 // 3345 // Kills: 3346 // - Rscratch1 3347 // 3348 void TemplateTable::prepare_invoke(int byte_no, 3349 Register Rmethod, // linked method (or i-klass) 3350 Register Rret_addr,// return address 3351 Register Rindex, // itable index, MethodType, Method, etc. 3352 Register Rrecv, // If caller wants to see it. 3353 Register Rflags, // If caller wants to test it. 3354 Register Rscratch 3355 ) { 3356 // Determine flags. 3357 const Bytecodes::Code code = bytecode(); 3358 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3359 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3360 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3361 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3362 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3363 const bool load_receiver = (Rrecv != noreg); 3364 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3365 3366 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3367 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3368 assert_different_registers(Rret_addr, Rscratch); 3369 3370 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3371 3372 // Saving of SP done in call_from_interpreter. 3373 3374 // Maybe push "appendix" to arguments. 3375 if (is_invokedynamic || is_invokehandle) { 3376 Label Ldone; 3377 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3378 __ beq(CCR0, Ldone); 3379 // Push "appendix" (MethodType, CallSite, etc.). 3380 // This must be done before we get the receiver, 3381 // since the parameter_size includes it. 3382 __ load_resolved_reference_at_index(Rscratch, Rindex); 3383 __ verify_oop(Rscratch); 3384 __ push_ptr(Rscratch); 3385 __ bind(Ldone); 3386 } 3387 3388 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3389 if (load_receiver) { 3390 const Register Rparam_count = Rscratch; 3391 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3392 __ load_receiver(Rparam_count, Rrecv); 3393 __ verify_oop(Rrecv); 3394 } 3395 3396 // Get return address. 3397 { 3398 Register Rtable_addr = Rscratch; 3399 Register Rret_type = Rret_addr; 3400 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3401 3402 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3403 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3404 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3405 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3406 // Get return address. 3407 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3408 } 3409 } 3410 3411 // Helper for virtual calls. Load target out of vtable and jump off! 3412 // Kills all passed registers. 3413 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3414 3415 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3416 const Register Rtarget_method = Rindex; 3417 3418 // Get target method & entry point. 3419 const int base = in_bytes(Klass::vtable_start_offset()); 3420 // Calc vtable addr scale the vtable index by 8. 3421 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes())); 3422 // Load target. 3423 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3424 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3425 // Argument and return type profiling. 3426 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3427 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3428 } 3429 3430 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3431 void TemplateTable::invokevirtual(int byte_no) { 3432 transition(vtos, vtos); 3433 3434 Register Rtable_addr = R11_scratch1, 3435 Rret_type = R12_scratch2, 3436 Rret_addr = R5_ARG3, 3437 Rflags = R22_tmp2, // Should survive C call. 3438 Rrecv = R3_ARG1, 3439 Rrecv_klass = Rrecv, 3440 Rvtableindex_or_method = R31, // Should survive C call. 3441 Rnum_params = R4_ARG2, 3442 Rnew_bc = R6_ARG4; 3443 3444 Label LnotFinal; 3445 3446 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3447 3448 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3449 __ bfalse(CCR0, LnotFinal); 3450 3451 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) { 3452 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3453 } 3454 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3455 3456 __ align(32, 12); 3457 __ bind(LnotFinal); 3458 // Load "this" pointer (receiver). 3459 __ rldicl(Rnum_params, Rflags, 64, 48); 3460 __ load_receiver(Rnum_params, Rrecv); 3461 __ verify_oop(Rrecv); 3462 3463 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3464 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3465 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3466 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3467 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3468 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3469 __ load_klass(Rrecv_klass, Rrecv); 3470 __ verify_klass_ptr(Rrecv_klass); 3471 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3472 3473 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3474 } 3475 3476 void TemplateTable::fast_invokevfinal(int byte_no) { 3477 transition(vtos, vtos); 3478 3479 assert(byte_no == f2_byte, "use this argument"); 3480 Register Rflags = R22_tmp2, 3481 Rmethod = R31; 3482 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3483 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3484 } 3485 3486 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3487 3488 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3489 3490 // Load receiver from stack slot. 3491 Register Rrecv = Rscratch2; 3492 Register Rnum_params = Rrecv; 3493 3494 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3495 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3496 3497 // Get return address. 3498 Register Rtable_addr = Rscratch1, 3499 Rret_addr = Rflags, 3500 Rret_type = Rret_addr; 3501 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3502 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3503 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3504 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3505 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3506 3507 // Load receiver and receiver NULL check. 3508 __ load_receiver(Rnum_params, Rrecv); 3509 __ null_check_throw(Rrecv, -1, Rscratch1); 3510 3511 __ profile_final_call(Rrecv, Rscratch1); 3512 // Argument and return type profiling. 3513 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3514 3515 // Do the call. 3516 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3517 } 3518 3519 void TemplateTable::invokespecial(int byte_no) { 3520 assert(byte_no == f1_byte, "use this argument"); 3521 transition(vtos, vtos); 3522 3523 Register Rtable_addr = R3_ARG1, 3524 Rret_addr = R4_ARG2, 3525 Rflags = R5_ARG3, 3526 Rreceiver = R6_ARG4, 3527 Rmethod = R31; 3528 3529 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3530 3531 // Receiver NULL check. 3532 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3533 3534 __ profile_call(R11_scratch1, R12_scratch2); 3535 // Argument and return type profiling. 3536 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3537 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3538 } 3539 3540 void TemplateTable::invokestatic(int byte_no) { 3541 assert(byte_no == f1_byte, "use this argument"); 3542 transition(vtos, vtos); 3543 3544 Register Rtable_addr = R3_ARG1, 3545 Rret_addr = R4_ARG2, 3546 Rflags = R5_ARG3; 3547 3548 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3549 3550 __ profile_call(R11_scratch1, R12_scratch2); 3551 // Argument and return type profiling. 3552 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3553 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3554 } 3555 3556 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3557 Register Rret, 3558 Register Rflags, 3559 Register Rmethod, 3560 Register Rtemp1, 3561 Register Rtemp2) { 3562 3563 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3564 Label LnotFinal; 3565 3566 // Check for vfinal. 3567 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3568 __ bfalse(CCR0, LnotFinal); 3569 3570 Register Rscratch = Rflags; // Rflags is dead now. 3571 3572 // Final call case. 3573 __ profile_final_call(Rtemp1, Rscratch); 3574 // Argument and return type profiling. 3575 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3576 // Do the final call - the index (f2) contains the method. 3577 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */); 3578 3579 // Non-final callc case. 3580 __ bind(LnotFinal); 3581 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3582 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch); 3583 } 3584 3585 void TemplateTable::invokeinterface(int byte_no) { 3586 assert(byte_no == f1_byte, "use this argument"); 3587 transition(vtos, vtos); 3588 3589 const Register Rscratch1 = R11_scratch1, 3590 Rscratch2 = R12_scratch2, 3591 Rmethod = R6_ARG4, 3592 Rmethod2 = R9_ARG7, 3593 Rinterface_klass = R5_ARG3, 3594 Rret_addr = R8_ARG6, 3595 Rindex = R10_ARG8, 3596 Rreceiver = R3_ARG1, 3597 Rrecv_klass = R4_ARG2, 3598 Rflags = R7_ARG5; 3599 3600 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1); 3601 3602 // First check for Object case, then private interface method, 3603 // then regular interface method. 3604 3605 // Get receiver klass - this is also a null check 3606 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2); 3607 __ load_klass(Rrecv_klass, Rreceiver); 3608 3609 // Check corner case object method. 3610 // Special case of invokeinterface called for virtual method of 3611 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3612 // The invokeinterface was rewritten to a invokevirtual, hence we have 3613 // to handle this corner case. 3614 3615 Label LnotObjectMethod, Lthrow_ame; 3616 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3617 __ bfalse(CCR0, LnotObjectMethod); 3618 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2); 3619 __ bind(LnotObjectMethod); 3620 3621 // Check for private method invocation - indicated by vfinal 3622 Label LnotVFinal, L_no_such_interface, L_subtype; 3623 3624 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3625 __ bfalse(CCR0, LnotVFinal); 3626 3627 __ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype); 3628 // If we get here the typecheck failed 3629 __ b(L_no_such_interface); 3630 __ bind(L_subtype); 3631 3632 // do the call 3633 3634 Register Rscratch = Rflags; // Rflags is dead now. 3635 3636 __ profile_final_call(Rscratch1, Rscratch); 3637 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true); 3638 3639 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch, Rrecv_klass /* scratch */); 3640 3641 __ bind(LnotVFinal); 3642 3643 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2, 3644 L_no_such_interface, /*return_method=*/false); 3645 3646 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3647 3648 // Find entry point to call. 3649 3650 // Get declaring interface class from method 3651 __ load_method_holder(Rinterface_klass, Rmethod); 3652 3653 // Get itable index from method 3654 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod); 3655 __ subfic(Rindex, Rindex, Method::itable_index_max); 3656 3657 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2, 3658 L_no_such_interface); 3659 3660 __ cmpdi(CCR0, Rmethod2, 0); 3661 __ beq(CCR0, Lthrow_ame); 3662 // Found entry. Jump off! 3663 // Argument and return type profiling. 3664 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true); 3665 //__ profile_called_method(Rindex, Rscratch1); 3666 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2); 3667 3668 // Vtable entry was NULL => Throw abstract method error. 3669 __ bind(Lthrow_ame); 3670 // Pass arguments for generating a verbose error message. 3671 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), 3672 Rrecv_klass, Rmethod); 3673 3674 // Interface was not found => Throw incompatible class change error. 3675 __ bind(L_no_such_interface); 3676 // Pass arguments for generating a verbose error message. 3677 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), 3678 Rrecv_klass, Rinterface_klass); 3679 DEBUG_ONLY( __ should_not_reach_here(); ) 3680 } 3681 3682 void TemplateTable::invokedynamic(int byte_no) { 3683 transition(vtos, vtos); 3684 3685 const Register Rret_addr = R3_ARG1, 3686 Rflags = R4_ARG2, 3687 Rmethod = R22_tmp2, 3688 Rscratch1 = R11_scratch1, 3689 Rscratch2 = R12_scratch2; 3690 3691 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3692 3693 // Profile this call. 3694 __ profile_call(Rscratch1, Rscratch2); 3695 3696 // Off we go. With the new method handles, we don't jump to a method handle 3697 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3698 // to be the callsite object the bootstrap method returned. This is passed to a 3699 // "link" method which does the dispatch (Most likely just grabs the MH stored 3700 // inside the callsite and does an invokehandle). 3701 // Argument and return type profiling. 3702 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3703 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3704 } 3705 3706 void TemplateTable::invokehandle(int byte_no) { 3707 transition(vtos, vtos); 3708 3709 const Register Rret_addr = R3_ARG1, 3710 Rflags = R4_ARG2, 3711 Rrecv = R5_ARG3, 3712 Rmethod = R22_tmp2, 3713 Rscratch1 = R11_scratch1, 3714 Rscratch2 = R12_scratch2; 3715 3716 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3717 __ verify_method_ptr(Rmethod); 3718 __ null_check_throw(Rrecv, -1, Rscratch2); 3719 3720 __ profile_final_call(Rrecv, Rscratch1); 3721 3722 // Still no call from handle => We call the method handle interpreter here. 3723 // Argument and return type profiling. 3724 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3725 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3726 } 3727 3728 // ============================================================================= 3729 // Allocation 3730 3731 // Puts allocated obj ref onto the expression stack. 3732 void TemplateTable::_new() { 3733 transition(vtos, atos); 3734 3735 Label Lslow_case, 3736 Ldone; 3737 3738 const Register RallocatedObject = R17_tos, 3739 RinstanceKlass = R9_ARG7, 3740 Rscratch = R11_scratch1, 3741 Roffset = R8_ARG6, 3742 Rinstance_size = Roffset, 3743 Rcpool = R4_ARG2, 3744 Rtags = R3_ARG1, 3745 Rindex = R5_ARG3; 3746 3747 // -------------------------------------------------------------------------- 3748 // Check if fast case is possible. 3749 3750 // Load pointers to const pool and const pool's tags array. 3751 __ get_cpool_and_tags(Rcpool, Rtags); 3752 // Load index of constant pool entry. 3753 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3754 3755 // Note: compared to other architectures, PPC's implementation always goes 3756 // to the slow path if TLAB is used and fails. 3757 if (UseTLAB) { 3758 // Make sure the class we're about to instantiate has been resolved 3759 // This is done before loading instanceKlass to be consistent with the order 3760 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3761 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3762 __ lbzx(Rtags, Rindex, Rtags); 3763 3764 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3765 __ bne(CCR0, Lslow_case); 3766 3767 // Get instanceKlass 3768 __ sldi(Roffset, Rindex, LogBytesPerWord); 3769 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass); 3770 3771 // Make sure klass is fully initialized and get instance_size. 3772 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3773 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3774 3775 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3776 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3777 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3778 3779 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3780 __ beq(CCR0, Lslow_case); 3781 3782 // -------------------------------------------------------------------------- 3783 // Fast case: 3784 // Allocate the instance. 3785 // 1) Try to allocate in the TLAB. 3786 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3787 3788 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3789 Register RnewTopValue = R6_ARG4; 3790 Register RendValue = R7_ARG5; 3791 3792 // Check if we can allocate in the TLAB. 3793 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3794 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3795 3796 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3797 3798 // If there is enough space, we do not CAS and do not clear. 3799 __ cmpld(CCR0, RnewTopValue, RendValue); 3800 __ bgt(CCR0, Lslow_case); 3801 3802 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3803 3804 if (!ZeroTLAB) { 3805 // -------------------------------------------------------------------------- 3806 // Init1: Zero out newly allocated memory. 3807 // Initialize remaining object fields. 3808 Register Rbase = Rtags; 3809 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3810 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3811 __ srdi(Rinstance_size, Rinstance_size, 3); 3812 3813 // Clear out object skipping header. Takes also care of the zero length case. 3814 __ clear_memory_doubleword(Rbase, Rinstance_size); 3815 } 3816 3817 // -------------------------------------------------------------------------- 3818 // Init2: Initialize the header: mark, klass 3819 // Init mark. 3820 if (UseBiasedLocking) { 3821 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3822 } else { 3823 __ load_const_optimized(Rscratch, markWord::prototype().value(), R0); 3824 } 3825 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3826 3827 // Init klass. 3828 __ store_klass_gap(RallocatedObject); 3829 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3830 3831 // Check and trigger dtrace event. 3832 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone); 3833 __ push(atos); 3834 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3835 __ pop(atos); 3836 3837 __ b(Ldone); 3838 } 3839 3840 // -------------------------------------------------------------------------- 3841 // slow case 3842 __ bind(Lslow_case); 3843 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3844 3845 // continue 3846 __ bind(Ldone); 3847 3848 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3849 __ membar(Assembler::StoreStore); 3850 } 3851 3852 void TemplateTable::newarray() { 3853 transition(itos, atos); 3854 3855 __ lbz(R4, 1, R14_bcp); 3856 __ extsw(R5, R17_tos); 3857 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3858 3859 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3860 __ membar(Assembler::StoreStore); 3861 } 3862 3863 void TemplateTable::anewarray() { 3864 transition(itos, atos); 3865 3866 __ get_constant_pool(R4); 3867 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3868 __ extsw(R6, R17_tos); // size 3869 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3870 3871 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3872 __ membar(Assembler::StoreStore); 3873 } 3874 3875 // Allocate a multi dimensional array 3876 void TemplateTable::multianewarray() { 3877 transition(vtos, atos); 3878 3879 Register Rptr = R31; // Needs to survive C call. 3880 3881 // Put ndims * wordSize into frame temp slot 3882 __ lbz(Rptr, 3, R14_bcp); 3883 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3884 // Esp points past last_dim, so set to R4 to first_dim address. 3885 __ add(R4, Rptr, R15_esp); 3886 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3887 // Pop all dimensions off the stack. 3888 __ add(R15_esp, Rptr, R15_esp); 3889 3890 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3891 __ membar(Assembler::StoreStore); 3892 } 3893 3894 void TemplateTable::arraylength() { 3895 transition(atos, itos); 3896 3897 __ verify_oop(R17_tos); 3898 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3899 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3900 } 3901 3902 // ============================================================================ 3903 // Typechecks 3904 3905 void TemplateTable::checkcast() { 3906 transition(atos, atos); 3907 3908 Label Ldone, Lis_null, Lquicked, Lresolved; 3909 Register Roffset = R6_ARG4, 3910 RobjKlass = R4_ARG2, 3911 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3912 Rcpool = R11_scratch1, 3913 Rtags = R12_scratch2; 3914 3915 // Null does not pass. 3916 __ cmpdi(CCR0, R17_tos, 0); 3917 __ beq(CCR0, Lis_null); 3918 3919 // Get constant pool tag to find out if the bytecode has already been "quickened". 3920 __ get_cpool_and_tags(Rcpool, Rtags); 3921 3922 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3923 3924 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3925 __ lbzx(Rtags, Rtags, Roffset); 3926 3927 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3928 __ beq(CCR0, Lquicked); 3929 3930 // Call into the VM to "quicken" instanceof. 3931 __ push_ptr(); // for GC 3932 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3933 __ get_vm_result_2(RspecifiedKlass); 3934 __ pop_ptr(); // Restore receiver. 3935 __ b(Lresolved); 3936 3937 // Extract target class from constant pool. 3938 __ bind(Lquicked); 3939 __ sldi(Roffset, Roffset, LogBytesPerWord); 3940 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 3941 3942 // Do the checkcast. 3943 __ bind(Lresolved); 3944 // Get value klass in RobjKlass. 3945 __ load_klass(RobjKlass, R17_tos); 3946 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3947 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3948 3949 // Not a subtype; so must throw exception 3950 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3951 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3952 __ mtctr(R11_scratch1); 3953 __ bctr(); 3954 3955 // Profile the null case. 3956 __ align(32, 12); 3957 __ bind(Lis_null); 3958 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3959 3960 __ align(32, 12); 3961 __ bind(Ldone); 3962 } 3963 3964 // Output: 3965 // - tos == 0: Obj was null or not an instance of class. 3966 // - tos == 1: Obj was an instance of class. 3967 void TemplateTable::instanceof() { 3968 transition(atos, itos); 3969 3970 Label Ldone, Lis_null, Lquicked, Lresolved; 3971 Register Roffset = R6_ARG4, 3972 RobjKlass = R4_ARG2, 3973 RspecifiedKlass = R5_ARG3, 3974 Rcpool = R11_scratch1, 3975 Rtags = R12_scratch2; 3976 3977 // Null does not pass. 3978 __ cmpdi(CCR0, R17_tos, 0); 3979 __ beq(CCR0, Lis_null); 3980 3981 // Get constant pool tag to find out if the bytecode has already been "quickened". 3982 __ get_cpool_and_tags(Rcpool, Rtags); 3983 3984 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3985 3986 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3987 __ lbzx(Rtags, Rtags, Roffset); 3988 3989 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3990 __ beq(CCR0, Lquicked); 3991 3992 // Call into the VM to "quicken" instanceof. 3993 __ push_ptr(); // for GC 3994 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3995 __ get_vm_result_2(RspecifiedKlass); 3996 __ pop_ptr(); // Restore receiver. 3997 __ b(Lresolved); 3998 3999 // Extract target class from constant pool. 4000 __ bind(Lquicked); 4001 __ sldi(Roffset, Roffset, LogBytesPerWord); 4002 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass); 4003 4004 // Do the checkcast. 4005 __ bind(Lresolved); 4006 // Get value klass in RobjKlass. 4007 __ load_klass(RobjKlass, R17_tos); 4008 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 4009 __ li(R17_tos, 1); 4010 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 4011 __ li(R17_tos, 0); 4012 4013 if (ProfileInterpreter) { 4014 __ b(Ldone); 4015 } 4016 4017 // Profile the null case. 4018 __ align(32, 12); 4019 __ bind(Lis_null); 4020 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 4021 4022 __ align(32, 12); 4023 __ bind(Ldone); 4024 } 4025 4026 // ============================================================================= 4027 // Breakpoints 4028 4029 void TemplateTable::_breakpoint() { 4030 transition(vtos, vtos); 4031 4032 // Get the unpatched byte code. 4033 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 4034 __ mr(R31, R3_RET); 4035 4036 // Post the breakpoint event. 4037 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 4038 4039 // Complete the execution of original bytecode. 4040 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 4041 } 4042 4043 // ============================================================================= 4044 // Exceptions 4045 4046 void TemplateTable::athrow() { 4047 transition(atos, vtos); 4048 4049 // Exception oop is in tos 4050 __ verify_oop(R17_tos); 4051 4052 __ null_check_throw(R17_tos, -1, R11_scratch1); 4053 4054 // Throw exception interpreter entry expects exception oop to be in R3. 4055 __ mr(R3_RET, R17_tos); 4056 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 4057 __ mtctr(R11_scratch1); 4058 __ bctr(); 4059 } 4060 4061 // ============================================================================= 4062 // Synchronization 4063 // Searches the basic object lock list on the stack for a free slot 4064 // and uses it to lock the obect in tos. 4065 // 4066 // Recursive locking is enabled by exiting the search if the same 4067 // object is already found in the list. Thus, a new basic lock obj lock 4068 // is allocated "higher up" in the stack and thus is found first 4069 // at next monitor exit. 4070 void TemplateTable::monitorenter() { 4071 transition(atos, vtos); 4072 4073 __ verify_oop(R17_tos); 4074 4075 Register Rcurrent_monitor = R11_scratch1, 4076 Rcurrent_obj = R12_scratch2, 4077 Robj_to_lock = R17_tos, 4078 Rscratch1 = R3_ARG1, 4079 Rscratch2 = R4_ARG2, 4080 Rscratch3 = R5_ARG3, 4081 Rcurrent_obj_addr = R6_ARG4; 4082 4083 // ------------------------------------------------------------------------------ 4084 // Null pointer exception. 4085 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4086 4087 // Try to acquire a lock on the object. 4088 // Repeat until succeeded (i.e., until monitorenter returns true). 4089 4090 // ------------------------------------------------------------------------------ 4091 // Find a free slot in the monitor block. 4092 Label Lfound, Lexit, Lallocate_new; 4093 ConditionRegister found_free_slot = CCR0, 4094 found_same_obj = CCR1, 4095 reached_limit = CCR6; 4096 { 4097 Label Lloop; 4098 Register Rlimit = Rcurrent_monitor; 4099 4100 // Set up search loop - start with topmost monitor. 4101 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 4102 4103 __ ld(Rlimit, 0, R1_SP); 4104 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 4105 4106 // Check if any slot is present => short cut to allocation if not. 4107 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4108 __ bgt(reached_limit, Lallocate_new); 4109 4110 // Pre-load topmost slot. 4111 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4112 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4113 // The search loop. 4114 __ bind(Lloop); 4115 // Found free slot? 4116 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4117 // Is this entry for same obj? If so, stop the search and take the found 4118 // free slot or allocate a new one to enable recursive locking. 4119 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4120 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4121 __ beq(found_free_slot, Lexit); 4122 __ beq(found_same_obj, Lallocate_new); 4123 __ bgt(reached_limit, Lallocate_new); 4124 // Check if last allocated BasicLockObj reached. 4125 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4126 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4127 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4128 __ b(Lloop); 4129 } 4130 4131 // ------------------------------------------------------------------------------ 4132 // Check if we found a free slot. 4133 __ bind(Lexit); 4134 4135 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4136 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4137 __ b(Lfound); 4138 4139 // We didn't find a free BasicObjLock => allocate one. 4140 __ align(32, 12); 4141 __ bind(Lallocate_new); 4142 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4143 __ mr(Rcurrent_monitor, R26_monitor); 4144 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4145 4146 // ------------------------------------------------------------------------------ 4147 // We now have a slot to lock. 4148 __ bind(Lfound); 4149 4150 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4151 // The object has already been poped from the stack, so the expression stack looks correct. 4152 __ addi(R14_bcp, R14_bcp, 1); 4153 4154 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4155 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4156 4157 // Check if there's enough space on the stack for the monitors after locking. 4158 // This emits a single store. 4159 __ generate_stack_overflow_check(0); 4160 4161 // The bcp has already been incremented. Just need to dispatch to next instruction. 4162 __ dispatch_next(vtos); 4163 } 4164 4165 void TemplateTable::monitorexit() { 4166 transition(atos, vtos); 4167 __ verify_oop(R17_tos); 4168 4169 Register Rcurrent_monitor = R11_scratch1, 4170 Rcurrent_obj = R12_scratch2, 4171 Robj_to_lock = R17_tos, 4172 Rcurrent_obj_addr = R3_ARG1, 4173 Rlimit = R4_ARG2; 4174 Label Lfound, Lillegal_monitor_state; 4175 4176 // Check corner case: unbalanced monitorEnter / Exit. 4177 __ ld(Rlimit, 0, R1_SP); 4178 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4179 4180 // Null pointer check. 4181 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4182 4183 __ cmpld(CCR0, R26_monitor, Rlimit); 4184 __ bgt(CCR0, Lillegal_monitor_state); 4185 4186 // Find the corresponding slot in the monitors stack section. 4187 { 4188 Label Lloop; 4189 4190 // Start with topmost monitor. 4191 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4192 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4193 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4194 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4195 4196 __ bind(Lloop); 4197 // Is this entry for same obj? 4198 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4199 __ beq(CCR0, Lfound); 4200 4201 // Check if last allocated BasicLockObj reached. 4202 4203 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4204 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4205 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4206 4207 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4208 __ ble(CCR0, Lloop); 4209 } 4210 4211 // Fell through without finding the basic obj lock => throw up! 4212 __ bind(Lillegal_monitor_state); 4213 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4214 __ should_not_reach_here(); 4215 4216 __ align(32, 12); 4217 __ bind(Lfound); 4218 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4219 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4220 __ unlock_object(Rcurrent_monitor); 4221 } 4222 4223 // ============================================================================ 4224 // Wide bytecodes 4225 4226 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4227 void TemplateTable::wide() { 4228 transition(vtos, vtos); 4229 4230 const Register Rtable = R11_scratch1, 4231 Rindex = R12_scratch2, 4232 Rtmp = R0; 4233 4234 __ lbz(Rindex, 1, R14_bcp); 4235 4236 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4237 4238 __ slwi(Rindex, Rindex, LogBytesPerWord); 4239 __ ldx(Rtmp, Rtable, Rindex); 4240 __ mtctr(Rtmp); 4241 __ bctr(); 4242 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4243 } --- EOF ---