1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2015 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "interpreter/interpreterRuntime.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "interpreter/templateInterpreter.hpp" 32 #include "interpreter/templateTable.hpp" 33 #include "memory/universe.inline.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "utilities/macros.hpp" 41 42 #undef __ 43 #define __ _masm-> 44 45 // ============================================================================ 46 // Misc helpers 47 48 // Do an oop store like *(base + index) = val OR *(base + offset) = val 49 // (only one of both variants is possible at the same time). 50 // Index can be noreg. 51 // Kills: 52 // Rbase, Rtmp 53 static void do_oop_store(InterpreterMacroAssembler* _masm, 54 Register Rbase, 55 RegisterOrConstant offset, 56 Register Rval, // Noreg means always null. 57 Register Rtmp1, 58 Register Rtmp2, 59 Register Rtmp3, 60 BarrierSet::Name barrier, 61 bool precise, 62 bool check_null) { 63 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 64 65 switch (barrier) { 66 #if INCLUDE_ALL_GCS 67 case BarrierSet::G1SATBCTLogging: 68 { 69 // Load and record the previous value. 70 __ g1_write_barrier_pre(Rbase, offset, 71 Rtmp3, /* holder of pre_val ? */ 72 Rtmp1, Rtmp2, false /* frame */); 73 74 Label Lnull, Ldone; 75 if (Rval != noreg) { 76 if (check_null) { 77 __ cmpdi(CCR0, Rval, 0); 78 __ beq(CCR0, Lnull); 79 } 80 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 81 // Mark the card. 82 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 83 __ add(Rbase, offset, Rbase); 84 } 85 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 86 if (check_null) { __ b(Ldone); } 87 } 88 89 if (Rval == noreg || check_null) { // Store null oop. 90 Register Rnull = Rval; 91 __ bind(Lnull); 92 if (Rval == noreg) { 93 Rnull = Rtmp1; 94 __ li(Rnull, 0); 95 } 96 if (UseCompressedOops) { 97 __ stw(Rnull, offset, Rbase); 98 } else { 99 __ std(Rnull, offset, Rbase); 100 } 101 } 102 __ bind(Ldone); 103 } 104 break; 105 #endif // INCLUDE_ALL_GCS 106 case BarrierSet::CardTableForRS: 107 case BarrierSet::CardTableExtension: 108 { 109 Label Lnull, Ldone; 110 if (Rval != noreg) { 111 if (check_null) { 112 __ cmpdi(CCR0, Rval, 0); 113 __ beq(CCR0, Lnull); 114 } 115 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 116 // Mark the card. 117 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 118 __ add(Rbase, offset, Rbase); 119 } 120 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 121 if (check_null) { 122 __ b(Ldone); 123 } 124 } 125 126 if (Rval == noreg || check_null) { // Store null oop. 127 Register Rnull = Rval; 128 __ bind(Lnull); 129 if (Rval == noreg) { 130 Rnull = Rtmp1; 131 __ li(Rnull, 0); 132 } 133 if (UseCompressedOops) { 134 __ stw(Rnull, offset, Rbase); 135 } else { 136 __ std(Rnull, offset, Rbase); 137 } 138 } 139 __ bind(Ldone); 140 } 141 break; 142 case BarrierSet::ModRef: 143 ShouldNotReachHere(); 144 break; 145 default: 146 ShouldNotReachHere(); 147 } 148 } 149 150 // ============================================================================ 151 // Platform-dependent initialization 152 153 void TemplateTable::pd_initialize() { 154 // No ppc64 specific initialization. 155 } 156 157 Address TemplateTable::at_bcp(int offset) { 158 // Not used on ppc. 159 ShouldNotReachHere(); 160 return Address(); 161 } 162 163 // Patches the current bytecode (ptr to it located in bcp) 164 // in the bytecode stream with a new one. 165 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 166 // With sharing on, may need to test method flag. 167 if (!RewriteBytecodes) return; 168 Label L_patch_done; 169 170 switch (new_bc) { 171 case Bytecodes::_fast_aputfield: 172 case Bytecodes::_fast_bputfield: 173 case Bytecodes::_fast_cputfield: 174 case Bytecodes::_fast_dputfield: 175 case Bytecodes::_fast_fputfield: 176 case Bytecodes::_fast_iputfield: 177 case Bytecodes::_fast_lputfield: 178 case Bytecodes::_fast_sputfield: 179 { 180 // We skip bytecode quickening for putfield instructions when 181 // the put_code written to the constant pool cache is zero. 182 // This is required so that every execution of this instruction 183 // calls out to InterpreterRuntime::resolve_get_put to do 184 // additional, required work. 185 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 186 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 187 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 188 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: 189 #if defined(VM_LITTLE_ENDIAN) 190 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); 191 #else 192 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 193 #endif 194 __ cmpwi(CCR0, Rnew_bc, 0); 195 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 196 __ beq(CCR0, L_patch_done); 197 // __ isync(); // acquire not needed 198 break; 199 } 200 201 default: 202 assert(byte_no == -1, "sanity"); 203 if (load_bc_into_bc_reg) { 204 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 205 } 206 } 207 208 if (JvmtiExport::can_post_breakpoint()) { 209 Label L_fast_patch; 210 __ lbz(Rtemp, 0, R14_bcp); 211 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 212 __ bne(CCR0, L_fast_patch); 213 // Perform the quickening, slowly, in the bowels of the breakpoint table. 214 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 215 __ b(L_patch_done); 216 __ bind(L_fast_patch); 217 } 218 219 // Patch bytecode. 220 __ stb(Rnew_bc, 0, R14_bcp); 221 222 __ bind(L_patch_done); 223 } 224 225 // ============================================================================ 226 // Individual instructions 227 228 void TemplateTable::nop() { 229 transition(vtos, vtos); 230 // Nothing to do. 231 } 232 233 void TemplateTable::shouldnotreachhere() { 234 transition(vtos, vtos); 235 __ stop("shouldnotreachhere bytecode"); 236 } 237 238 void TemplateTable::aconst_null() { 239 transition(vtos, atos); 240 __ li(R17_tos, 0); 241 } 242 243 void TemplateTable::iconst(int value) { 244 transition(vtos, itos); 245 assert(value >= -1 && value <= 5, ""); 246 __ li(R17_tos, value); 247 } 248 249 void TemplateTable::lconst(int value) { 250 transition(vtos, ltos); 251 assert(value >= -1 && value <= 5, ""); 252 __ li(R17_tos, value); 253 } 254 255 void TemplateTable::fconst(int value) { 256 transition(vtos, ftos); 257 static float zero = 0.0; 258 static float one = 1.0; 259 static float two = 2.0; 260 switch (value) { 261 default: ShouldNotReachHere(); 262 case 0: { 263 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 264 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 265 break; 266 } 267 case 1: { 268 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 269 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 270 break; 271 } 272 case 2: { 273 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 274 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 275 break; 276 } 277 } 278 } 279 280 void TemplateTable::dconst(int value) { 281 transition(vtos, dtos); 282 static double zero = 0.0; 283 static double one = 1.0; 284 switch (value) { 285 case 0: { 286 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 287 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 288 break; 289 } 290 case 1: { 291 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 292 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 293 break; 294 } 295 default: ShouldNotReachHere(); 296 } 297 } 298 299 void TemplateTable::bipush() { 300 transition(vtos, itos); 301 __ lbz(R17_tos, 1, R14_bcp); 302 __ extsb(R17_tos, R17_tos); 303 } 304 305 void TemplateTable::sipush() { 306 transition(vtos, itos); 307 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 308 } 309 310 void TemplateTable::ldc(bool wide) { 311 Register Rscratch1 = R11_scratch1, 312 Rscratch2 = R12_scratch2, 313 Rcpool = R3_ARG1; 314 315 transition(vtos, vtos); 316 Label notInt, notClass, exit; 317 318 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 319 if (wide) { // Read index. 320 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 321 } else { 322 __ lbz(Rscratch1, 1, R14_bcp); 323 } 324 325 const int base_offset = ConstantPool::header_size() * wordSize; 326 const int tags_offset = Array<u1>::base_offset_in_bytes(); 327 328 // Get type from tags. 329 __ addi(Rscratch2, Rscratch2, tags_offset); 330 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 331 332 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 333 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 334 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 335 336 // Resolved class - need to call vm to get java mirror of the class. 337 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 338 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above? 339 __ beq(CCR0, notClass); 340 341 __ li(R4, wide ? 1 : 0); 342 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 343 __ push(atos); 344 __ b(exit); 345 346 __ align(32, 12); 347 __ bind(notClass); 348 __ addi(Rcpool, Rcpool, base_offset); 349 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 350 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 351 __ bne(CCR0, notInt); 352 __ lwax(R17_tos, Rcpool, Rscratch1); 353 __ push(itos); 354 __ b(exit); 355 356 __ align(32, 12); 357 __ bind(notInt); 358 #ifdef ASSERT 359 // String and Object are rewritten to fast_aldc 360 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 361 __ asm_assert_eq("unexpected type", 0x8765); 362 #endif 363 __ lfsx(F15_ftos, Rcpool, Rscratch1); 364 __ push(ftos); 365 366 __ align(32, 12); 367 __ bind(exit); 368 } 369 370 // Fast path for caching oop constants. 371 void TemplateTable::fast_aldc(bool wide) { 372 transition(vtos, atos); 373 374 int index_size = wide ? sizeof(u2) : sizeof(u1); 375 const Register Rscratch = R11_scratch1; 376 Label is_null; 377 378 // We are resolved if the resolved reference cache entry contains a 379 // non-null object (CallSite, etc.) 380 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 381 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); 382 __ verify_oop(R17_tos); 383 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); 384 385 __ bind(is_null); 386 __ load_const_optimized(R3_ARG1, (int)bytecode()); 387 388 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 389 390 // First time invocation - must resolve first. 391 __ call_VM(R17_tos, entry, R3_ARG1); 392 __ verify_oop(R17_tos); 393 } 394 395 void TemplateTable::ldc2_w() { 396 transition(vtos, vtos); 397 Label Llong, Lexit; 398 399 Register Rindex = R11_scratch1, 400 Rcpool = R12_scratch2, 401 Rtag = R3_ARG1; 402 __ get_cpool_and_tags(Rcpool, Rtag); 403 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 404 405 const int base_offset = ConstantPool::header_size() * wordSize; 406 const int tags_offset = Array<u1>::base_offset_in_bytes(); 407 // Get type from tags. 408 __ addi(Rcpool, Rcpool, base_offset); 409 __ addi(Rtag, Rtag, tags_offset); 410 411 __ lbzx(Rtag, Rtag, Rindex); 412 413 __ sldi(Rindex, Rindex, LogBytesPerWord); 414 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 415 __ bne(CCR0, Llong); 416 // A double can be placed at word-aligned locations in the constant pool. 417 // Check out Conversions.java for an example. 418 // Also ConstantPool::header_size() is 20, which makes it very difficult 419 // to double-align double on the constant pool. SG, 11/7/97 420 __ lfdx(F15_ftos, Rcpool, Rindex); 421 __ push(dtos); 422 __ b(Lexit); 423 424 __ bind(Llong); 425 __ ldx(R17_tos, Rcpool, Rindex); 426 __ push(ltos); 427 428 __ bind(Lexit); 429 } 430 431 // Get the locals index located in the bytecode stream at bcp + offset. 432 void TemplateTable::locals_index(Register Rdst, int offset) { 433 __ lbz(Rdst, offset, R14_bcp); 434 } 435 436 void TemplateTable::iload() { 437 iload_internal(); 438 } 439 440 void TemplateTable::nofast_iload() { 441 iload_internal(may_not_rewrite); 442 } 443 444 void TemplateTable::iload_internal(RewriteControl rc) { 445 transition(vtos, itos); 446 447 // Get the local value into tos 448 const Register Rindex = R22_tmp2; 449 locals_index(Rindex); 450 451 // Rewrite iload,iload pair into fast_iload2 452 // iload,caload pair into fast_icaload 453 if (RewriteFrequentPairs && rc == may_rewrite) { 454 Label Lrewrite, Ldone; 455 Register Rnext_byte = R3_ARG1, 456 Rrewrite_to = R6_ARG4, 457 Rscratch = R11_scratch1; 458 459 // get next byte 460 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 461 462 // if _iload, wait to rewrite to iload2. We only want to rewrite the 463 // last two iloads in a pair. Comparing against fast_iload means that 464 // the next bytecode is neither an iload or a caload, and therefore 465 // an iload pair. 466 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 467 __ beq(CCR0, Ldone); 468 469 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 470 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 471 __ beq(CCR1, Lrewrite); 472 473 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 474 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 475 __ beq(CCR0, Lrewrite); 476 477 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 478 479 __ bind(Lrewrite); 480 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 481 __ bind(Ldone); 482 } 483 484 __ load_local_int(R17_tos, Rindex, Rindex); 485 } 486 487 // Load 2 integers in a row without dispatching 488 void TemplateTable::fast_iload2() { 489 transition(vtos, itos); 490 491 __ lbz(R3_ARG1, 1, R14_bcp); 492 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 493 494 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 495 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 496 __ push_i(R3_ARG1); 497 } 498 499 void TemplateTable::fast_iload() { 500 transition(vtos, itos); 501 // Get the local value into tos 502 503 const Register Rindex = R11_scratch1; 504 locals_index(Rindex); 505 __ load_local_int(R17_tos, Rindex, Rindex); 506 } 507 508 // Load a local variable type long from locals area to TOS cache register. 509 // Local index resides in bytecodestream. 510 void TemplateTable::lload() { 511 transition(vtos, ltos); 512 513 const Register Rindex = R11_scratch1; 514 locals_index(Rindex); 515 __ load_local_long(R17_tos, Rindex, Rindex); 516 } 517 518 void TemplateTable::fload() { 519 transition(vtos, ftos); 520 521 const Register Rindex = R11_scratch1; 522 locals_index(Rindex); 523 __ load_local_float(F15_ftos, Rindex, Rindex); 524 } 525 526 void TemplateTable::dload() { 527 transition(vtos, dtos); 528 529 const Register Rindex = R11_scratch1; 530 locals_index(Rindex); 531 __ load_local_double(F15_ftos, Rindex, Rindex); 532 } 533 534 void TemplateTable::aload() { 535 transition(vtos, atos); 536 537 const Register Rindex = R11_scratch1; 538 locals_index(Rindex); 539 __ load_local_ptr(R17_tos, Rindex, Rindex); 540 } 541 542 void TemplateTable::locals_index_wide(Register Rdst) { 543 // Offset is 2, not 1, because Lbcp points to wide prefix code. 544 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 545 } 546 547 void TemplateTable::wide_iload() { 548 // Get the local value into tos. 549 550 const Register Rindex = R11_scratch1; 551 locals_index_wide(Rindex); 552 __ load_local_int(R17_tos, Rindex, Rindex); 553 } 554 555 void TemplateTable::wide_lload() { 556 transition(vtos, ltos); 557 558 const Register Rindex = R11_scratch1; 559 locals_index_wide(Rindex); 560 __ load_local_long(R17_tos, Rindex, Rindex); 561 } 562 563 void TemplateTable::wide_fload() { 564 transition(vtos, ftos); 565 566 const Register Rindex = R11_scratch1; 567 locals_index_wide(Rindex); 568 __ load_local_float(F15_ftos, Rindex, Rindex); 569 } 570 571 void TemplateTable::wide_dload() { 572 transition(vtos, dtos); 573 574 const Register Rindex = R11_scratch1; 575 locals_index_wide(Rindex); 576 __ load_local_double(F15_ftos, Rindex, Rindex); 577 } 578 579 void TemplateTable::wide_aload() { 580 transition(vtos, atos); 581 582 const Register Rindex = R11_scratch1; 583 locals_index_wide(Rindex); 584 __ load_local_ptr(R17_tos, Rindex, Rindex); 585 } 586 587 void TemplateTable::iaload() { 588 transition(itos, itos); 589 590 const Register Rload_addr = R3_ARG1, 591 Rarray = R4_ARG2, 592 Rtemp = R5_ARG3; 593 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 594 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 595 } 596 597 void TemplateTable::laload() { 598 transition(itos, ltos); 599 600 const Register Rload_addr = R3_ARG1, 601 Rarray = R4_ARG2, 602 Rtemp = R5_ARG3; 603 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 604 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 605 } 606 607 void TemplateTable::faload() { 608 transition(itos, ftos); 609 610 const Register Rload_addr = R3_ARG1, 611 Rarray = R4_ARG2, 612 Rtemp = R5_ARG3; 613 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 614 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 615 } 616 617 void TemplateTable::daload() { 618 transition(itos, dtos); 619 620 const Register Rload_addr = R3_ARG1, 621 Rarray = R4_ARG2, 622 Rtemp = R5_ARG3; 623 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 624 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 625 } 626 627 void TemplateTable::aaload() { 628 transition(itos, atos); 629 630 // tos: index 631 // result tos: array 632 const Register Rload_addr = R3_ARG1, 633 Rarray = R4_ARG2, 634 Rtemp = R5_ARG3; 635 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 636 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 637 __ verify_oop(R17_tos); 638 //__ dcbt(R17_tos); // prefetch 639 } 640 641 void TemplateTable::baload() { 642 transition(itos, itos); 643 644 const Register Rload_addr = R3_ARG1, 645 Rarray = R4_ARG2, 646 Rtemp = R5_ARG3; 647 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 648 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 649 __ extsb(R17_tos, R17_tos); 650 } 651 652 void TemplateTable::caload() { 653 transition(itos, itos); 654 655 const Register Rload_addr = R3_ARG1, 656 Rarray = R4_ARG2, 657 Rtemp = R5_ARG3; 658 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 659 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 660 } 661 662 // Iload followed by caload frequent pair. 663 void TemplateTable::fast_icaload() { 664 transition(vtos, itos); 665 666 const Register Rload_addr = R3_ARG1, 667 Rarray = R4_ARG2, 668 Rtemp = R11_scratch1; 669 670 locals_index(R17_tos); 671 __ load_local_int(R17_tos, Rtemp, R17_tos); 672 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 673 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 674 } 675 676 void TemplateTable::saload() { 677 transition(itos, itos); 678 679 const Register Rload_addr = R11_scratch1, 680 Rarray = R12_scratch2, 681 Rtemp = R3_ARG1; 682 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 683 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 684 } 685 686 void TemplateTable::iload(int n) { 687 transition(vtos, itos); 688 689 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 690 } 691 692 void TemplateTable::lload(int n) { 693 transition(vtos, ltos); 694 695 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 696 } 697 698 void TemplateTable::fload(int n) { 699 transition(vtos, ftos); 700 701 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 702 } 703 704 void TemplateTable::dload(int n) { 705 transition(vtos, dtos); 706 707 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 708 } 709 710 void TemplateTable::aload(int n) { 711 transition(vtos, atos); 712 713 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 714 } 715 716 void TemplateTable::aload_0() { 717 aload_0_internal(); 718 } 719 720 void TemplateTable::nofast_aload_0() { 721 aload_0_internal(may_not_rewrite); 722 } 723 724 void TemplateTable::aload_0_internal(RewriteControl rc) { 725 transition(vtos, atos); 726 // According to bytecode histograms, the pairs: 727 // 728 // _aload_0, _fast_igetfield 729 // _aload_0, _fast_agetfield 730 // _aload_0, _fast_fgetfield 731 // 732 // occur frequently. If RewriteFrequentPairs is set, the (slow) 733 // _aload_0 bytecode checks if the next bytecode is either 734 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 735 // rewrites the current bytecode into a pair bytecode; otherwise it 736 // rewrites the current bytecode into _0 that doesn't do 737 // the pair check anymore. 738 // 739 // Note: If the next bytecode is _getfield, the rewrite must be 740 // delayed, otherwise we may miss an opportunity for a pair. 741 // 742 // Also rewrite frequent pairs 743 // aload_0, aload_1 744 // aload_0, iload_1 745 // These bytecodes with a small amount of code are most profitable 746 // to rewrite. 747 748 if (RewriteFrequentPairs && rc == may_rewrite) { 749 750 Label Lrewrite, Ldont_rewrite; 751 Register Rnext_byte = R3_ARG1, 752 Rrewrite_to = R6_ARG4, 753 Rscratch = R11_scratch1; 754 755 // Get next byte. 756 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 757 758 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 759 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 760 __ beq(CCR0, Ldont_rewrite); 761 762 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 763 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 764 __ beq(CCR1, Lrewrite); 765 766 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 767 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 768 __ beq(CCR0, Lrewrite); 769 770 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 771 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 772 __ beq(CCR1, Lrewrite); 773 774 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 775 776 __ bind(Lrewrite); 777 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 778 __ bind(Ldont_rewrite); 779 } 780 781 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 782 aload(0); 783 } 784 785 void TemplateTable::istore() { 786 transition(itos, vtos); 787 788 const Register Rindex = R11_scratch1; 789 locals_index(Rindex); 790 __ store_local_int(R17_tos, Rindex); 791 } 792 793 void TemplateTable::lstore() { 794 transition(ltos, vtos); 795 const Register Rindex = R11_scratch1; 796 locals_index(Rindex); 797 __ store_local_long(R17_tos, Rindex); 798 } 799 800 void TemplateTable::fstore() { 801 transition(ftos, vtos); 802 803 const Register Rindex = R11_scratch1; 804 locals_index(Rindex); 805 __ store_local_float(F15_ftos, Rindex); 806 } 807 808 void TemplateTable::dstore() { 809 transition(dtos, vtos); 810 811 const Register Rindex = R11_scratch1; 812 locals_index(Rindex); 813 __ store_local_double(F15_ftos, Rindex); 814 } 815 816 void TemplateTable::astore() { 817 transition(vtos, vtos); 818 819 const Register Rindex = R11_scratch1; 820 __ pop_ptr(); 821 __ verify_oop_or_return_address(R17_tos, Rindex); 822 locals_index(Rindex); 823 __ store_local_ptr(R17_tos, Rindex); 824 } 825 826 void TemplateTable::wide_istore() { 827 transition(vtos, vtos); 828 829 const Register Rindex = R11_scratch1; 830 __ pop_i(); 831 locals_index_wide(Rindex); 832 __ store_local_int(R17_tos, Rindex); 833 } 834 835 void TemplateTable::wide_lstore() { 836 transition(vtos, vtos); 837 838 const Register Rindex = R11_scratch1; 839 __ pop_l(); 840 locals_index_wide(Rindex); 841 __ store_local_long(R17_tos, Rindex); 842 } 843 844 void TemplateTable::wide_fstore() { 845 transition(vtos, vtos); 846 847 const Register Rindex = R11_scratch1; 848 __ pop_f(); 849 locals_index_wide(Rindex); 850 __ store_local_float(F15_ftos, Rindex); 851 } 852 853 void TemplateTable::wide_dstore() { 854 transition(vtos, vtos); 855 856 const Register Rindex = R11_scratch1; 857 __ pop_d(); 858 locals_index_wide(Rindex); 859 __ store_local_double(F15_ftos, Rindex); 860 } 861 862 void TemplateTable::wide_astore() { 863 transition(vtos, vtos); 864 865 const Register Rindex = R11_scratch1; 866 __ pop_ptr(); 867 __ verify_oop_or_return_address(R17_tos, Rindex); 868 locals_index_wide(Rindex); 869 __ store_local_ptr(R17_tos, Rindex); 870 } 871 872 void TemplateTable::iastore() { 873 transition(itos, vtos); 874 875 const Register Rindex = R3_ARG1, 876 Rstore_addr = R4_ARG2, 877 Rarray = R5_ARG3, 878 Rtemp = R6_ARG4; 879 __ pop_i(Rindex); 880 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 881 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 882 } 883 884 void TemplateTable::lastore() { 885 transition(ltos, vtos); 886 887 const Register Rindex = R3_ARG1, 888 Rstore_addr = R4_ARG2, 889 Rarray = R5_ARG3, 890 Rtemp = R6_ARG4; 891 __ pop_i(Rindex); 892 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 893 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 894 } 895 896 void TemplateTable::fastore() { 897 transition(ftos, vtos); 898 899 const Register Rindex = R3_ARG1, 900 Rstore_addr = R4_ARG2, 901 Rarray = R5_ARG3, 902 Rtemp = R6_ARG4; 903 __ pop_i(Rindex); 904 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 905 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 906 } 907 908 void TemplateTable::dastore() { 909 transition(dtos, vtos); 910 911 const Register Rindex = R3_ARG1, 912 Rstore_addr = R4_ARG2, 913 Rarray = R5_ARG3, 914 Rtemp = R6_ARG4; 915 __ pop_i(Rindex); 916 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 917 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 918 } 919 920 // Pop 3 values from the stack and... 921 void TemplateTable::aastore() { 922 transition(vtos, vtos); 923 924 Label Lstore_ok, Lis_null, Ldone; 925 const Register Rindex = R3_ARG1, 926 Rarray = R4_ARG2, 927 Rscratch = R11_scratch1, 928 Rscratch2 = R12_scratch2, 929 Rarray_klass = R5_ARG3, 930 Rarray_element_klass = Rarray_klass, 931 Rvalue_klass = R6_ARG4, 932 Rstore_addr = R31; // Use register which survives VM call. 933 934 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 935 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 936 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 937 938 __ verify_oop(R17_tos); 939 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 940 // Rindex is dead! 941 Register Rscratch3 = Rindex; 942 943 // Do array store check - check for NULL value first. 944 __ cmpdi(CCR0, R17_tos, 0); 945 __ beq(CCR0, Lis_null); 946 947 __ load_klass(Rarray_klass, Rarray); 948 __ load_klass(Rvalue_klass, R17_tos); 949 950 // Do fast instanceof cache test. 951 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 952 953 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 954 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 955 956 // Fell through: subtype check failed => throw an exception. 957 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 958 __ mtctr(R11_scratch1); 959 __ bctr(); 960 961 __ bind(Lis_null); 962 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 963 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 964 __ profile_null_seen(Rscratch, Rscratch2); 965 __ b(Ldone); 966 967 // Store is OK. 968 __ bind(Lstore_ok); 969 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 970 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 971 972 __ bind(Ldone); 973 // Adjust sp (pops array, index and value). 974 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 975 } 976 977 void TemplateTable::bastore() { 978 transition(itos, vtos); 979 980 const Register Rindex = R11_scratch1, 981 Rarray = R12_scratch2, 982 Rscratch = R3_ARG1; 983 __ pop_i(Rindex); 984 // tos: val 985 // Rarray: array ptr (popped by index_check) 986 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 987 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 988 } 989 990 void TemplateTable::castore() { 991 transition(itos, vtos); 992 993 const Register Rindex = R11_scratch1, 994 Rarray = R12_scratch2, 995 Rscratch = R3_ARG1; 996 __ pop_i(Rindex); 997 // tos: val 998 // Rarray: array ptr (popped by index_check) 999 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 1000 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 1001 } 1002 1003 void TemplateTable::sastore() { 1004 castore(); 1005 } 1006 1007 void TemplateTable::istore(int n) { 1008 transition(itos, vtos); 1009 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1010 } 1011 1012 void TemplateTable::lstore(int n) { 1013 transition(ltos, vtos); 1014 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1015 } 1016 1017 void TemplateTable::fstore(int n) { 1018 transition(ftos, vtos); 1019 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1020 } 1021 1022 void TemplateTable::dstore(int n) { 1023 transition(dtos, vtos); 1024 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1025 } 1026 1027 void TemplateTable::astore(int n) { 1028 transition(vtos, vtos); 1029 1030 __ pop_ptr(); 1031 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1032 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1033 } 1034 1035 void TemplateTable::pop() { 1036 transition(vtos, vtos); 1037 1038 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1039 } 1040 1041 void TemplateTable::pop2() { 1042 transition(vtos, vtos); 1043 1044 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1045 } 1046 1047 void TemplateTable::dup() { 1048 transition(vtos, vtos); 1049 1050 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1051 __ push_ptr(R11_scratch1); 1052 } 1053 1054 void TemplateTable::dup_x1() { 1055 transition(vtos, vtos); 1056 1057 Register Ra = R11_scratch1, 1058 Rb = R12_scratch2; 1059 // stack: ..., a, b 1060 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1061 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1062 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1063 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1064 __ push_ptr(Rb); 1065 // stack: ..., b, a, b 1066 } 1067 1068 void TemplateTable::dup_x2() { 1069 transition(vtos, vtos); 1070 1071 Register Ra = R11_scratch1, 1072 Rb = R12_scratch2, 1073 Rc = R3_ARG1; 1074 1075 // stack: ..., a, b, c 1076 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1077 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1078 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1079 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1080 // stack: ..., c, b, c 1081 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1082 // stack: ..., c, a, c 1083 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1084 __ push_ptr(Rc); // push c 1085 // stack: ..., c, a, b, c 1086 } 1087 1088 void TemplateTable::dup2() { 1089 transition(vtos, vtos); 1090 1091 Register Ra = R11_scratch1, 1092 Rb = R12_scratch2; 1093 // stack: ..., a, b 1094 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1095 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1096 __ push_2ptrs(Ra, Rb); 1097 // stack: ..., a, b, a, b 1098 } 1099 1100 void TemplateTable::dup2_x1() { 1101 transition(vtos, vtos); 1102 1103 Register Ra = R11_scratch1, 1104 Rb = R12_scratch2, 1105 Rc = R3_ARG1; 1106 // stack: ..., a, b, c 1107 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1108 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1109 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1110 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1111 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1112 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1113 // stack: ..., b, c, a 1114 __ push_2ptrs(Rb, Rc); 1115 // stack: ..., b, c, a, b, c 1116 } 1117 1118 void TemplateTable::dup2_x2() { 1119 transition(vtos, vtos); 1120 1121 Register Ra = R11_scratch1, 1122 Rb = R12_scratch2, 1123 Rc = R3_ARG1, 1124 Rd = R4_ARG2; 1125 // stack: ..., a, b, c, d 1126 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1127 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1128 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1129 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1130 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1131 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1132 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1133 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1134 // stack: ..., c, d, a, b 1135 __ push_2ptrs(Rc, Rd); 1136 // stack: ..., c, d, a, b, c, d 1137 } 1138 1139 void TemplateTable::swap() { 1140 transition(vtos, vtos); 1141 // stack: ..., a, b 1142 1143 Register Ra = R11_scratch1, 1144 Rb = R12_scratch2; 1145 // stack: ..., a, b 1146 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1147 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1148 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1149 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1150 // stack: ..., b, a 1151 } 1152 1153 void TemplateTable::iop2(Operation op) { 1154 transition(itos, itos); 1155 1156 Register Rscratch = R11_scratch1; 1157 1158 __ pop_i(Rscratch); 1159 // tos = number of bits to shift 1160 // Rscratch = value to shift 1161 switch (op) { 1162 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1163 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1164 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1165 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1166 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1167 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1168 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1169 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1170 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1171 default: ShouldNotReachHere(); 1172 } 1173 } 1174 1175 void TemplateTable::lop2(Operation op) { 1176 transition(ltos, ltos); 1177 1178 Register Rscratch = R11_scratch1; 1179 __ pop_l(Rscratch); 1180 switch (op) { 1181 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1182 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1183 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1184 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1185 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1186 default: ShouldNotReachHere(); 1187 } 1188 } 1189 1190 void TemplateTable::idiv() { 1191 transition(itos, itos); 1192 1193 Label Lnormal, Lexception, Ldone; 1194 Register Rdividend = R11_scratch1; // Used by irem. 1195 1196 __ addi(R0, R17_tos, 1); 1197 __ cmplwi(CCR0, R0, 2); 1198 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1199 1200 __ cmpwi(CCR1, R17_tos, 0); 1201 __ beq(CCR1, Lexception); // divisor == 0 1202 1203 __ pop_i(Rdividend); 1204 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1205 __ b(Ldone); 1206 1207 __ bind(Lexception); 1208 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1209 __ mtctr(R11_scratch1); 1210 __ bctr(); 1211 1212 __ align(32, 12); 1213 __ bind(Lnormal); 1214 __ pop_i(Rdividend); 1215 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1216 __ bind(Ldone); 1217 } 1218 1219 void TemplateTable::irem() { 1220 transition(itos, itos); 1221 1222 __ mr(R12_scratch2, R17_tos); 1223 idiv(); 1224 __ mullw(R17_tos, R17_tos, R12_scratch2); 1225 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1226 } 1227 1228 void TemplateTable::lmul() { 1229 transition(ltos, ltos); 1230 1231 __ pop_l(R11_scratch1); 1232 __ mulld(R17_tos, R11_scratch1, R17_tos); 1233 } 1234 1235 void TemplateTable::ldiv() { 1236 transition(ltos, ltos); 1237 1238 Label Lnormal, Lexception, Ldone; 1239 Register Rdividend = R11_scratch1; // Used by lrem. 1240 1241 __ addi(R0, R17_tos, 1); 1242 __ cmpldi(CCR0, R0, 2); 1243 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1244 1245 __ cmpdi(CCR1, R17_tos, 0); 1246 __ beq(CCR1, Lexception); // divisor == 0 1247 1248 __ pop_l(Rdividend); 1249 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1250 __ b(Ldone); 1251 1252 __ bind(Lexception); 1253 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1254 __ mtctr(R11_scratch1); 1255 __ bctr(); 1256 1257 __ align(32, 12); 1258 __ bind(Lnormal); 1259 __ pop_l(Rdividend); 1260 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1261 __ bind(Ldone); 1262 } 1263 1264 void TemplateTable::lrem() { 1265 transition(ltos, ltos); 1266 1267 __ mr(R12_scratch2, R17_tos); 1268 ldiv(); 1269 __ mulld(R17_tos, R17_tos, R12_scratch2); 1270 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1271 } 1272 1273 void TemplateTable::lshl() { 1274 transition(itos, ltos); 1275 1276 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1277 __ pop_l(R11_scratch1); 1278 __ sld(R17_tos, R11_scratch1, R17_tos); 1279 } 1280 1281 void TemplateTable::lshr() { 1282 transition(itos, ltos); 1283 1284 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1285 __ pop_l(R11_scratch1); 1286 __ srad(R17_tos, R11_scratch1, R17_tos); 1287 } 1288 1289 void TemplateTable::lushr() { 1290 transition(itos, ltos); 1291 1292 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1293 __ pop_l(R11_scratch1); 1294 __ srd(R17_tos, R11_scratch1, R17_tos); 1295 } 1296 1297 void TemplateTable::fop2(Operation op) { 1298 transition(ftos, ftos); 1299 1300 switch (op) { 1301 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1302 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1303 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1304 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1305 case rem: 1306 __ pop_f(F1_ARG1); 1307 __ fmr(F2_ARG2, F15_ftos); 1308 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1309 __ fmr(F15_ftos, F1_RET); 1310 break; 1311 1312 default: ShouldNotReachHere(); 1313 } 1314 } 1315 1316 void TemplateTable::dop2(Operation op) { 1317 transition(dtos, dtos); 1318 1319 switch (op) { 1320 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1321 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1322 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1323 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1324 case rem: 1325 __ pop_d(F1_ARG1); 1326 __ fmr(F2_ARG2, F15_ftos); 1327 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1328 __ fmr(F15_ftos, F1_RET); 1329 break; 1330 1331 default: ShouldNotReachHere(); 1332 } 1333 } 1334 1335 // Negate the value in the TOS cache. 1336 void TemplateTable::ineg() { 1337 transition(itos, itos); 1338 1339 __ neg(R17_tos, R17_tos); 1340 } 1341 1342 // Negate the value in the TOS cache. 1343 void TemplateTable::lneg() { 1344 transition(ltos, ltos); 1345 1346 __ neg(R17_tos, R17_tos); 1347 } 1348 1349 void TemplateTable::fneg() { 1350 transition(ftos, ftos); 1351 1352 __ fneg(F15_ftos, F15_ftos); 1353 } 1354 1355 void TemplateTable::dneg() { 1356 transition(dtos, dtos); 1357 1358 __ fneg(F15_ftos, F15_ftos); 1359 } 1360 1361 // Increments a local variable in place. 1362 void TemplateTable::iinc() { 1363 transition(vtos, vtos); 1364 1365 const Register Rindex = R11_scratch1, 1366 Rincrement = R0, 1367 Rvalue = R12_scratch2; 1368 1369 locals_index(Rindex); // Load locals index from bytecode stream. 1370 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1371 __ extsb(Rincrement, Rincrement); 1372 1373 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1374 1375 __ add(Rvalue, Rincrement, Rvalue); 1376 __ stw(Rvalue, 0, Rindex); 1377 } 1378 1379 void TemplateTable::wide_iinc() { 1380 transition(vtos, vtos); 1381 1382 Register Rindex = R11_scratch1, 1383 Rlocals_addr = Rindex, 1384 Rincr = R12_scratch2; 1385 locals_index_wide(Rindex); 1386 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1387 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1388 __ add(R17_tos, Rincr, R17_tos); 1389 __ stw(R17_tos, 0, Rlocals_addr); 1390 } 1391 1392 void TemplateTable::convert() { 1393 // %%%%% Factor this first part accross platforms 1394 #ifdef ASSERT 1395 TosState tos_in = ilgl; 1396 TosState tos_out = ilgl; 1397 switch (bytecode()) { 1398 case Bytecodes::_i2l: // fall through 1399 case Bytecodes::_i2f: // fall through 1400 case Bytecodes::_i2d: // fall through 1401 case Bytecodes::_i2b: // fall through 1402 case Bytecodes::_i2c: // fall through 1403 case Bytecodes::_i2s: tos_in = itos; break; 1404 case Bytecodes::_l2i: // fall through 1405 case Bytecodes::_l2f: // fall through 1406 case Bytecodes::_l2d: tos_in = ltos; break; 1407 case Bytecodes::_f2i: // fall through 1408 case Bytecodes::_f2l: // fall through 1409 case Bytecodes::_f2d: tos_in = ftos; break; 1410 case Bytecodes::_d2i: // fall through 1411 case Bytecodes::_d2l: // fall through 1412 case Bytecodes::_d2f: tos_in = dtos; break; 1413 default : ShouldNotReachHere(); 1414 } 1415 switch (bytecode()) { 1416 case Bytecodes::_l2i: // fall through 1417 case Bytecodes::_f2i: // fall through 1418 case Bytecodes::_d2i: // fall through 1419 case Bytecodes::_i2b: // fall through 1420 case Bytecodes::_i2c: // fall through 1421 case Bytecodes::_i2s: tos_out = itos; break; 1422 case Bytecodes::_i2l: // fall through 1423 case Bytecodes::_f2l: // fall through 1424 case Bytecodes::_d2l: tos_out = ltos; break; 1425 case Bytecodes::_i2f: // fall through 1426 case Bytecodes::_l2f: // fall through 1427 case Bytecodes::_d2f: tos_out = ftos; break; 1428 case Bytecodes::_i2d: // fall through 1429 case Bytecodes::_l2d: // fall through 1430 case Bytecodes::_f2d: tos_out = dtos; break; 1431 default : ShouldNotReachHere(); 1432 } 1433 transition(tos_in, tos_out); 1434 #endif 1435 1436 // Conversion 1437 Label done; 1438 switch (bytecode()) { 1439 case Bytecodes::_i2l: 1440 __ extsw(R17_tos, R17_tos); 1441 break; 1442 1443 case Bytecodes::_l2i: 1444 // Nothing to do, we'll continue to work with the lower bits. 1445 break; 1446 1447 case Bytecodes::_i2b: 1448 __ extsb(R17_tos, R17_tos); 1449 break; 1450 1451 case Bytecodes::_i2c: 1452 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1453 break; 1454 1455 case Bytecodes::_i2s: 1456 __ extsh(R17_tos, R17_tos); 1457 break; 1458 1459 case Bytecodes::_i2d: 1460 __ extsw(R17_tos, R17_tos); 1461 case Bytecodes::_l2d: 1462 __ push_l_pop_d(); 1463 __ fcfid(F15_ftos, F15_ftos); 1464 break; 1465 1466 case Bytecodes::_i2f: 1467 __ extsw(R17_tos, R17_tos); 1468 __ push_l_pop_d(); 1469 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1470 // Comment: alternatively, load with sign extend could be done by lfiwax. 1471 __ fcfids(F15_ftos, F15_ftos); 1472 } else { 1473 __ fcfid(F15_ftos, F15_ftos); 1474 __ frsp(F15_ftos, F15_ftos); 1475 } 1476 break; 1477 1478 case Bytecodes::_l2f: 1479 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1480 __ push_l_pop_d(); 1481 __ fcfids(F15_ftos, F15_ftos); 1482 } else { 1483 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1484 __ mr(R3_ARG1, R17_tos); 1485 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1486 __ fmr(F15_ftos, F1_RET); 1487 } 1488 break; 1489 1490 case Bytecodes::_f2d: 1491 // empty 1492 break; 1493 1494 case Bytecodes::_d2f: 1495 __ frsp(F15_ftos, F15_ftos); 1496 break; 1497 1498 case Bytecodes::_d2i: 1499 case Bytecodes::_f2i: 1500 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1501 __ li(R17_tos, 0); // 0 in case of NAN 1502 __ bso(CCR0, done); 1503 __ fctiwz(F15_ftos, F15_ftos); 1504 __ push_d_pop_l(); 1505 break; 1506 1507 case Bytecodes::_d2l: 1508 case Bytecodes::_f2l: 1509 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1510 __ li(R17_tos, 0); // 0 in case of NAN 1511 __ bso(CCR0, done); 1512 __ fctidz(F15_ftos, F15_ftos); 1513 __ push_d_pop_l(); 1514 break; 1515 1516 default: ShouldNotReachHere(); 1517 } 1518 __ bind(done); 1519 } 1520 1521 // Long compare 1522 void TemplateTable::lcmp() { 1523 transition(ltos, itos); 1524 1525 const Register Rscratch = R11_scratch1; 1526 __ pop_l(Rscratch); // first operand, deeper in stack 1527 1528 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1529 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1530 __ srwi(Rscratch, R17_tos, 30); 1531 __ srawi(R17_tos, R17_tos, 31); 1532 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1533 } 1534 1535 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1536 // unordered_result == -1 => fcmpl or dcmpl 1537 // unordered_result == 1 => fcmpg or dcmpg 1538 void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1539 const FloatRegister Rfirst = F0_SCRATCH, 1540 Rsecond = F15_ftos; 1541 const Register Rscratch = R11_scratch1; 1542 1543 if (is_float) { 1544 __ pop_f(Rfirst); 1545 } else { 1546 __ pop_d(Rfirst); 1547 } 1548 1549 Label Lunordered, Ldone; 1550 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1551 if (unordered_result) { 1552 __ bso(CCR0, Lunordered); 1553 } 1554 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1555 __ srwi(Rscratch, R17_tos, 30); 1556 __ srawi(R17_tos, R17_tos, 31); 1557 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1558 if (unordered_result) { 1559 __ b(Ldone); 1560 __ bind(Lunordered); 1561 __ load_const_optimized(R17_tos, unordered_result); 1562 } 1563 __ bind(Ldone); 1564 } 1565 1566 // Branch_conditional which takes TemplateTable::Condition. 1567 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1568 bool positive = false; 1569 Assembler::Condition cond = Assembler::equal; 1570 switch (cc) { 1571 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1572 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1573 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1574 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1575 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1576 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1577 default: ShouldNotReachHere(); 1578 } 1579 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1580 int bi = Assembler::bi0(crx, cond); 1581 __ bc(bo, bi, L); 1582 } 1583 1584 void TemplateTable::branch(bool is_jsr, bool is_wide) { 1585 1586 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1587 __ verify_thread(); 1588 1589 const Register Rscratch1 = R11_scratch1, 1590 Rscratch2 = R12_scratch2, 1591 Rscratch3 = R3_ARG1, 1592 R4_counters = R4_ARG2, 1593 bumped_count = R31, 1594 Rdisp = R22_tmp2; 1595 1596 __ profile_taken_branch(Rscratch1, bumped_count); 1597 1598 // Get (wide) offset. 1599 if (is_wide) { 1600 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1601 } else { 1602 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1603 } 1604 1605 // -------------------------------------------------------------------------- 1606 // Handle all the JSR stuff here, then exit. 1607 // It's much shorter and cleaner than intermingling with the 1608 // non-JSR normal-branch stuff occurring below. 1609 if (is_jsr) { 1610 // Compute return address as bci in Otos_i. 1611 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1612 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1613 __ subf(R17_tos, Rscratch1, Rscratch2); 1614 1615 // Bump bcp to target of JSR. 1616 __ add(R14_bcp, Rdisp, R14_bcp); 1617 // Push returnAddress for "ret" on stack. 1618 __ push_ptr(R17_tos); 1619 // And away we go! 1620 __ dispatch_next(vtos); 1621 return; 1622 } 1623 1624 // -------------------------------------------------------------------------- 1625 // Normal (non-jsr) branch handling 1626 1627 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1628 if (increment_invocation_counter_for_backward_branches) { 1629 //__ unimplemented("branch invocation counter"); 1630 1631 Label Lforward; 1632 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1633 1634 // Check branch direction. 1635 __ cmpdi(CCR0, Rdisp, 0); 1636 __ bgt(CCR0, Lforward); 1637 1638 __ get_method_counters(R19_method, R4_counters, Lforward); 1639 1640 if (TieredCompilation) { 1641 Label Lno_mdo, Loverflow; 1642 const int increment = InvocationCounter::count_increment; 1643 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1644 if (ProfileInterpreter) { 1645 Register Rmdo = Rscratch1; 1646 1647 // If no method data exists, go to profile_continue. 1648 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1649 __ cmpdi(CCR0, Rmdo, 0); 1650 __ beq(CCR0, Lno_mdo); 1651 1652 // Increment backedge counter in the MDO. 1653 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1654 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1655 __ load_const_optimized(Rscratch3, mask, R0); 1656 __ addi(Rscratch2, Rscratch2, increment); 1657 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1658 __ and_(Rscratch3, Rscratch2, Rscratch3); 1659 __ bne(CCR0, Lforward); 1660 __ b(Loverflow); 1661 } 1662 1663 // If there's no MDO, increment counter in method. 1664 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1665 __ bind(Lno_mdo); 1666 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1667 __ load_const_optimized(Rscratch3, mask, R0); 1668 __ addi(Rscratch2, Rscratch2, increment); 1669 __ stw(Rscratch2, mo_bc_offs, R19_method); 1670 __ and_(Rscratch3, Rscratch2, Rscratch3); 1671 __ bne(CCR0, Lforward); 1672 1673 __ bind(Loverflow); 1674 1675 // Notify point for loop, pass branch bytecode. 1676 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1677 1678 // Was an OSR adapter generated? 1679 // O0 = osr nmethod 1680 __ cmpdi(CCR0, R3_RET, 0); 1681 __ beq(CCR0, Lforward); 1682 1683 // Has the nmethod been invalidated already? 1684 __ lbz(R0, nmethod::state_offset(), R3_RET); 1685 __ cmpwi(CCR0, R0, nmethod::in_use); 1686 __ bne(CCR0, Lforward); 1687 1688 // Migrate the interpreter frame off of the stack. 1689 // We can use all registers because we will not return to interpreter from this point. 1690 1691 // Save nmethod. 1692 const Register osr_nmethod = R31; 1693 __ mr(osr_nmethod, R3_RET); 1694 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1695 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1696 __ reset_last_Java_frame(); 1697 // OSR buffer is in ARG1. 1698 1699 // Remove the interpreter frame. 1700 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1701 1702 // Jump to the osr code. 1703 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1704 __ mtlr(R0); 1705 __ mtctr(R11_scratch1); 1706 __ bctr(); 1707 1708 } else { 1709 1710 const Register invoke_ctr = Rscratch1; 1711 // Update Backedge branch separately from invocations. 1712 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1713 1714 if (ProfileInterpreter) { 1715 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1716 if (UseOnStackReplacement) { 1717 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1718 } 1719 } else { 1720 if (UseOnStackReplacement) { 1721 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1722 } 1723 } 1724 } 1725 1726 __ bind(Lforward); 1727 1728 } else { 1729 // Bump bytecode pointer by displacement (take the branch). 1730 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1731 } 1732 // Continue with bytecode @ target. 1733 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1734 // %%%%% and changing dispatch_next to dispatch_only. 1735 __ dispatch_next(vtos); 1736 } 1737 1738 // Helper function for if_cmp* methods below. 1739 // Factored out common compare and branch code. 1740 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1741 Label Lnot_taken; 1742 // Note: The condition code we get is the condition under which we 1743 // *fall through*! So we have to inverse the CC here. 1744 1745 if (is_jint) { 1746 if (cmp0) { 1747 __ cmpwi(CCR0, Rfirst, 0); 1748 } else { 1749 __ cmpw(CCR0, Rfirst, Rsecond); 1750 } 1751 } else { 1752 if (cmp0) { 1753 __ cmpdi(CCR0, Rfirst, 0); 1754 } else { 1755 __ cmpd(CCR0, Rfirst, Rsecond); 1756 } 1757 } 1758 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1759 1760 // Conition is false => Jump! 1761 branch(false, false); 1762 1763 // Condition is not true => Continue. 1764 __ align(32, 12); 1765 __ bind(Lnot_taken); 1766 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1767 } 1768 1769 // Compare integer values with zero and fall through if CC holds, branch away otherwise. 1770 void TemplateTable::if_0cmp(Condition cc) { 1771 transition(itos, vtos); 1772 1773 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1774 } 1775 1776 // Compare integer values and fall through if CC holds, branch away otherwise. 1777 // 1778 // Interface: 1779 // - Rfirst: First operand (older stack value) 1780 // - tos: Second operand (younger stack value) 1781 void TemplateTable::if_icmp(Condition cc) { 1782 transition(itos, vtos); 1783 1784 const Register Rfirst = R0, 1785 Rsecond = R17_tos; 1786 1787 __ pop_i(Rfirst); 1788 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1789 } 1790 1791 void TemplateTable::if_nullcmp(Condition cc) { 1792 transition(atos, vtos); 1793 1794 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1795 } 1796 1797 void TemplateTable::if_acmp(Condition cc) { 1798 transition(atos, vtos); 1799 1800 const Register Rfirst = R0, 1801 Rsecond = R17_tos; 1802 1803 __ pop_ptr(Rfirst); 1804 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1805 } 1806 1807 void TemplateTable::ret() { 1808 locals_index(R11_scratch1); 1809 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1810 1811 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1812 1813 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1814 __ add(R11_scratch1, R17_tos, R11_scratch1); 1815 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1816 __ dispatch_next(vtos); 1817 } 1818 1819 void TemplateTable::wide_ret() { 1820 transition(vtos, vtos); 1821 1822 const Register Rindex = R3_ARG1, 1823 Rscratch1 = R11_scratch1, 1824 Rscratch2 = R12_scratch2; 1825 1826 locals_index_wide(Rindex); 1827 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1828 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1829 // Tos now contains the bci, compute the bcp from that. 1830 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1831 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1832 __ add(R14_bcp, Rscratch1, Rscratch2); 1833 __ dispatch_next(vtos); 1834 } 1835 1836 void TemplateTable::tableswitch() { 1837 transition(itos, vtos); 1838 1839 Label Ldispatch, Ldefault_case; 1840 Register Rlow_byte = R3_ARG1, 1841 Rindex = Rlow_byte, 1842 Rhigh_byte = R4_ARG2, 1843 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1844 Rscratch1 = R11_scratch1, 1845 Rscratch2 = R12_scratch2, 1846 Roffset = R6_ARG4; 1847 1848 // Align bcp. 1849 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1850 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1851 1852 // Load lo & hi. 1853 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1854 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned); 1855 1856 // Check for default case (=index outside [low,high]). 1857 __ cmpw(CCR0, R17_tos, Rlow_byte); 1858 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1859 __ blt(CCR0, Ldefault_case); 1860 __ bgt(CCR1, Ldefault_case); 1861 1862 // Lookup dispatch offset. 1863 __ sub(Rindex, R17_tos, Rlow_byte); 1864 __ extsw(Rindex, Rindex); 1865 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1866 __ sldi(Rindex, Rindex, LogBytesPerInt); 1867 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1868 #if defined(VM_LITTLE_ENDIAN) 1869 __ lwbrx(Roffset, Rdef_offset_addr, Rindex); 1870 __ extsw(Roffset, Roffset); 1871 #else 1872 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1873 #endif 1874 __ b(Ldispatch); 1875 1876 __ bind(Ldefault_case); 1877 __ profile_switch_default(Rhigh_byte, Rscratch1); 1878 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1879 1880 __ bind(Ldispatch); 1881 1882 __ add(R14_bcp, Roffset, R14_bcp); 1883 __ dispatch_next(vtos); 1884 } 1885 1886 void TemplateTable::lookupswitch() { 1887 transition(itos, itos); 1888 __ stop("lookupswitch bytecode should have been rewritten"); 1889 } 1890 1891 // Table switch using linear search through cases. 1892 // Bytecode stream format: 1893 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1894 // Note: Everything is big-endian format here. 1895 void TemplateTable::fast_linearswitch() { 1896 transition(itos, vtos); 1897 1898 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case; 1899 Register Rcount = R3_ARG1, 1900 Rcurrent_pair = R4_ARG2, 1901 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1902 Roffset = R31, // Might need to survive C call. 1903 Rvalue = R12_scratch2, 1904 Rscratch = R11_scratch1, 1905 Rcmp_value = R17_tos; 1906 1907 // Align bcp. 1908 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1909 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1910 1911 // Setup loop counter and limit. 1912 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned); 1913 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1914 1915 __ mtctr(Rcount); 1916 __ cmpwi(CCR0, Rcount, 0); 1917 __ bne(CCR0, Lloop_entry); 1918 1919 // Default case 1920 __ bind(Ldefault_case); 1921 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed); 1922 if (ProfileInterpreter) { 1923 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1924 } 1925 __ b(Lcontinue_execution); 1926 1927 // Next iteration 1928 __ bind(Lsearch_loop); 1929 __ bdz(Ldefault_case); 1930 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1931 __ bind(Lloop_entry); 1932 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned); 1933 __ cmpw(CCR0, Rvalue, Rcmp_value); 1934 __ bne(CCR0, Lsearch_loop); 1935 1936 // Found, load offset. 1937 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed); 1938 // Calculate case index and profile 1939 __ mfctr(Rcurrent_pair); 1940 if (ProfileInterpreter) { 1941 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair); 1942 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1943 } 1944 1945 __ bind(Lcontinue_execution); 1946 __ add(R14_bcp, Roffset, R14_bcp); 1947 __ dispatch_next(vtos); 1948 } 1949 1950 // Table switch using binary search (value/offset pairs are ordered). 1951 // Bytecode stream format: 1952 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1953 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1954 void TemplateTable::fast_binaryswitch() { 1955 1956 transition(itos, vtos); 1957 // Implementation using the following core algorithm: (copied from Intel) 1958 // 1959 // int binary_search(int key, LookupswitchPair* array, int n) { 1960 // // Binary search according to "Methodik des Programmierens" by 1961 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1962 // int i = 0; 1963 // int j = n; 1964 // while (i+1 < j) { 1965 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1966 // // with Q: for all i: 0 <= i < n: key < a[i] 1967 // // where a stands for the array and assuming that the (inexisting) 1968 // // element a[n] is infinitely big. 1969 // int h = (i + j) >> 1; 1970 // // i < h < j 1971 // if (key < array[h].fast_match()) { 1972 // j = h; 1973 // } else { 1974 // i = h; 1975 // } 1976 // } 1977 // // R: a[i] <= key < a[i+1] or Q 1978 // // (i.e., if key is within array, i is the correct index) 1979 // return i; 1980 // } 1981 1982 // register allocation 1983 const Register Rkey = R17_tos; // already set (tosca) 1984 const Register Rarray = R3_ARG1; 1985 const Register Ri = R4_ARG2; 1986 const Register Rj = R5_ARG3; 1987 const Register Rh = R6_ARG4; 1988 const Register Rscratch = R11_scratch1; 1989 1990 const int log_entry_size = 3; 1991 const int entry_size = 1 << log_entry_size; 1992 1993 Label found; 1994 1995 // Find Array start, 1996 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1997 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1998 1999 // initialize i & j 2000 __ li(Ri,0); 2001 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned); 2002 2003 // and start. 2004 Label entry; 2005 __ b(entry); 2006 2007 // binary search loop 2008 { Label loop; 2009 __ bind(loop); 2010 // int h = (i + j) >> 1; 2011 __ srdi(Rh, Rh, 1); 2012 // if (key < array[h].fast_match()) { 2013 // j = h; 2014 // } else { 2015 // i = h; 2016 // } 2017 __ sldi(Rscratch, Rh, log_entry_size); 2018 #if defined(VM_LITTLE_ENDIAN) 2019 __ lwbrx(Rscratch, Rscratch, Rarray); 2020 #else 2021 __ lwzx(Rscratch, Rscratch, Rarray); 2022 #endif 2023 2024 // if (key < current value) 2025 // Rh = Rj 2026 // else 2027 // Rh = Ri 2028 Label Lgreater; 2029 __ cmpw(CCR0, Rkey, Rscratch); 2030 __ bge(CCR0, Lgreater); 2031 __ mr(Rj, Rh); 2032 __ b(entry); 2033 __ bind(Lgreater); 2034 __ mr(Ri, Rh); 2035 2036 // while (i+1 < j) 2037 __ bind(entry); 2038 __ addi(Rscratch, Ri, 1); 2039 __ cmpw(CCR0, Rscratch, Rj); 2040 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2041 2042 __ blt(CCR0, loop); 2043 } 2044 2045 // End of binary search, result index is i (must check again!). 2046 Label default_case; 2047 Label continue_execution; 2048 if (ProfileInterpreter) { 2049 __ mr(Rh, Ri); // Save index in i for profiling. 2050 } 2051 // Ri = value offset 2052 __ sldi(Ri, Ri, log_entry_size); 2053 __ add(Ri, Ri, Rarray); 2054 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned); 2055 2056 Label not_found; 2057 // Ri = offset offset 2058 __ cmpw(CCR0, Rkey, Rscratch); 2059 __ beq(CCR0, not_found); 2060 // entry not found -> j = default offset 2061 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned); 2062 __ b(default_case); 2063 2064 __ bind(not_found); 2065 // entry found -> j = offset 2066 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2067 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned); 2068 2069 if (ProfileInterpreter) { 2070 __ b(continue_execution); 2071 } 2072 2073 __ bind(default_case); // fall through (if not profiling) 2074 __ profile_switch_default(Ri, Rscratch); 2075 2076 __ bind(continue_execution); 2077 2078 __ extsw(Rj, Rj); 2079 __ add(R14_bcp, Rj, R14_bcp); 2080 __ dispatch_next(vtos); 2081 } 2082 2083 void TemplateTable::_return(TosState state) { 2084 transition(state, state); 2085 assert(_desc->calls_vm(), 2086 "inconsistent calls_vm information"); // call in remove_activation 2087 2088 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2089 2090 Register Rscratch = R11_scratch1, 2091 Rklass = R12_scratch2, 2092 Rklass_flags = Rklass; 2093 Label Lskip_register_finalizer; 2094 2095 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2096 assert(state == vtos, "only valid state"); 2097 __ ld(R17_tos, 0, R18_locals); 2098 2099 // Load klass of this obj. 2100 __ load_klass(Rklass, R17_tos); 2101 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2102 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2103 __ bfalse(CCR0, Lskip_register_finalizer); 2104 2105 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2106 2107 __ align(32, 12); 2108 __ bind(Lskip_register_finalizer); 2109 } 2110 2111 // Move the result value into the correct register and remove memory stack frame. 2112 __ remove_activation(state, /* throw_monitor_exception */ true); 2113 // Restoration of lr done by remove_activation. 2114 switch (state) { 2115 case ltos: 2116 case btos: 2117 case ctos: 2118 case stos: 2119 case atos: 2120 case itos: __ mr(R3_RET, R17_tos); break; 2121 case ftos: 2122 case dtos: __ fmr(F1_RET, F15_ftos); break; 2123 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2124 // to get visible before the reference to the object gets stored anywhere. 2125 __ membar(Assembler::StoreStore); break; 2126 default : ShouldNotReachHere(); 2127 } 2128 __ blr(); 2129 } 2130 2131 // ============================================================================ 2132 // Constant pool cache access 2133 // 2134 // Memory ordering: 2135 // 2136 // Like done in C++ interpreter, we load the fields 2137 // - _indices 2138 // - _f12_oop 2139 // acquired, because these are asked if the cache is already resolved. We don't 2140 // want to float loads above this check. 2141 // See also comments in ConstantPoolCacheEntry::bytecode_1(), 2142 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2143 2144 // Call into the VM if call site is not yet resolved 2145 // 2146 // Input regs: 2147 // - None, all passed regs are outputs. 2148 // 2149 // Returns: 2150 // - Rcache: The const pool cache entry that contains the resolved result. 2151 // - Rresult: Either noreg or output for f1/f2. 2152 // 2153 // Kills: 2154 // - Rscratch 2155 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2156 2157 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2158 Label Lresolved, Ldone; 2159 2160 Bytecodes::Code code = bytecode(); 2161 switch (code) { 2162 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; 2163 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; 2164 } 2165 2166 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2167 // We are resolved if the indices offset contains the current bytecode. 2168 #if defined(VM_LITTLE_ENDIAN) 2169 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache); 2170 #else 2171 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2172 #endif 2173 // Acquire by cmp-br-isync (see below). 2174 __ cmpdi(CCR0, Rscratch, (int)code); 2175 __ beq(CCR0, Lresolved); 2176 2177 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); 2178 __ li(R4_ARG2, code); 2179 __ call_VM(noreg, entry, R4_ARG2, true); 2180 2181 // Update registers with resolved info. 2182 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2183 __ b(Ldone); 2184 2185 __ bind(Lresolved); 2186 __ isync(); // Order load wrt. succeeding loads. 2187 __ bind(Ldone); 2188 } 2189 2190 // Load the constant pool cache entry at field accesses into registers. 2191 // The Rcache and Rindex registers must be set before call. 2192 // Input: 2193 // - Rcache, Rindex 2194 // Output: 2195 // - Robj, Roffset, Rflags 2196 void TemplateTable::load_field_cp_cache_entry(Register Robj, 2197 Register Rcache, 2198 Register Rindex /* unused on PPC64 */, 2199 Register Roffset, 2200 Register Rflags, 2201 bool is_static = false) { 2202 assert_different_registers(Rcache, Rflags, Roffset); 2203 // assert(Rindex == noreg, "parameter not used on PPC64"); 2204 2205 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2206 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2207 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2208 if (is_static) { 2209 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2210 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2211 // Acquire not needed here. Following access has an address dependency on this value. 2212 } 2213 } 2214 2215 // Load the constant pool cache entry at invokes into registers. 2216 // Resolve if necessary. 2217 2218 // Input Registers: 2219 // - None, bcp is used, though 2220 // 2221 // Return registers: 2222 // - Rmethod (f1 field or f2 if invokevirtual) 2223 // - Ritable_index (f2 field) 2224 // - Rflags (flags field) 2225 // 2226 // Kills: 2227 // - R21 2228 // 2229 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2230 Register Rmethod, 2231 Register Ritable_index, 2232 Register Rflags, 2233 bool is_invokevirtual, 2234 bool is_invokevfinal, 2235 bool is_invokedynamic) { 2236 2237 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2238 // Determine constant pool cache field offsets. 2239 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2240 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2241 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2242 // Access constant pool cache fields. 2243 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2244 2245 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2246 2247 if (is_invokevfinal) { 2248 assert(Ritable_index == noreg, "register not used"); 2249 // Already resolved. 2250 __ get_cache_and_index_at_bcp(Rcache, 1); 2251 } else { 2252 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2253 } 2254 2255 __ ld(Rmethod, method_offset, Rcache); 2256 __ ld(Rflags, flags_offset, Rcache); 2257 2258 if (Ritable_index != noreg) { 2259 __ ld(Ritable_index, index_offset, Rcache); 2260 } 2261 } 2262 2263 // ============================================================================ 2264 // Field access 2265 2266 // Volatile variables demand their effects be made known to all CPU's 2267 // in order. Store buffers on most chips allow reads & writes to 2268 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2269 // without some kind of memory barrier (i.e., it's not sufficient that 2270 // the interpreter does not reorder volatile references, the hardware 2271 // also must not reorder them). 2272 // 2273 // According to the new Java Memory Model (JMM): 2274 // (1) All volatiles are serialized wrt to each other. ALSO reads & 2275 // writes act as aquire & release, so: 2276 // (2) A read cannot let unrelated NON-volatile memory refs that 2277 // happen after the read float up to before the read. It's OK for 2278 // non-volatile memory refs that happen before the volatile read to 2279 // float down below it. 2280 // (3) Similar a volatile write cannot let unrelated NON-volatile 2281 // memory refs that happen BEFORE the write float down to after the 2282 // write. It's OK for non-volatile memory refs that happen after the 2283 // volatile write to float up before it. 2284 // 2285 // We only put in barriers around volatile refs (they are expensive), 2286 // not _between_ memory refs (that would require us to track the 2287 // flavor of the previous memory refs). Requirements (2) and (3) 2288 // require some barriers before volatile stores and after volatile 2289 // loads. These nearly cover requirement (1) but miss the 2290 // volatile-store-volatile-load case. This final case is placed after 2291 // volatile-stores although it could just as well go before 2292 // volatile-loads. 2293 2294 // The registers cache and index expected to be set before call. 2295 // Correct values of the cache and index registers are preserved. 2296 // Kills: 2297 // Rcache (if has_tos) 2298 // Rscratch 2299 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2300 2301 assert_different_registers(Rcache, Rscratch); 2302 2303 if (JvmtiExport::can_post_field_access()) { 2304 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2305 Label Lno_field_access_post; 2306 2307 // Check if post field access in enabled. 2308 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2309 __ lwz(Rscratch, offs, Rscratch); 2310 2311 __ cmpwi(CCR0, Rscratch, 0); 2312 __ beq(CCR0, Lno_field_access_post); 2313 2314 // Post access enabled - do it! 2315 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2316 if (is_static) { 2317 __ li(R17_tos, 0); 2318 } else { 2319 if (has_tos) { 2320 // The fast bytecode versions have obj ptr in register. 2321 // Thus, save object pointer before call_VM() clobbers it 2322 // put object on tos where GC wants it. 2323 __ push_ptr(R17_tos); 2324 } else { 2325 // Load top of stack (do not pop the value off the stack). 2326 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2327 } 2328 __ verify_oop(R17_tos); 2329 } 2330 // tos: object pointer or NULL if static 2331 // cache: cache entry pointer 2332 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2333 if (!is_static && has_tos) { 2334 // Restore object pointer. 2335 __ pop_ptr(R17_tos); 2336 __ verify_oop(R17_tos); 2337 } else { 2338 // Cache is still needed to get class or obj. 2339 __ get_cache_and_index_at_bcp(Rcache, 1); 2340 } 2341 2342 __ align(32, 12); 2343 __ bind(Lno_field_access_post); 2344 } 2345 } 2346 2347 // kills R11_scratch1 2348 void TemplateTable::pop_and_check_object(Register Roop) { 2349 Register Rtmp = R11_scratch1; 2350 2351 assert_different_registers(Rtmp, Roop); 2352 __ pop_ptr(Roop); 2353 // For field access must check obj. 2354 __ null_check_throw(Roop, -1, Rtmp); 2355 __ verify_oop(Roop); 2356 } 2357 2358 // PPC64: implement volatile loads as fence-store-acquire. 2359 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2360 transition(vtos, vtos); 2361 2362 Label Lacquire, Lisync; 2363 2364 const Register Rcache = R3_ARG1, 2365 Rclass_or_obj = R22_tmp2, 2366 Roffset = R23_tmp3, 2367 Rflags = R31, 2368 Rbtable = R5_ARG3, 2369 Rbc = R6_ARG4, 2370 Rscratch = R12_scratch2; 2371 2372 static address field_branch_table[number_of_states], 2373 static_branch_table[number_of_states]; 2374 2375 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; 2376 2377 // Get field offset. 2378 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2379 2380 // JVMTI support 2381 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2382 2383 // Load after possible GC. 2384 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2385 2386 // Load pointer to branch table. 2387 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2388 2389 // Get volatile flag. 2390 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2391 // Note: sync is needed before volatile load on PPC64. 2392 2393 // Check field type. 2394 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2395 2396 #ifdef ASSERT 2397 Label LFlagInvalid; 2398 __ cmpldi(CCR0, Rflags, number_of_states); 2399 __ bge(CCR0, LFlagInvalid); 2400 #endif 2401 2402 // Load from branch table and dispatch (volatile case: one instruction ahead). 2403 __ sldi(Rflags, Rflags, LogBytesPerWord); 2404 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2405 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2406 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2407 } 2408 __ ldx(Rbtable, Rbtable, Rflags); 2409 2410 // Get the obj from stack. 2411 if (!is_static) { 2412 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2413 } else { 2414 __ verify_oop(Rclass_or_obj); 2415 } 2416 2417 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2418 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2419 } 2420 __ mtctr(Rbtable); 2421 __ bctr(); 2422 2423 #ifdef ASSERT 2424 __ bind(LFlagInvalid); 2425 __ stop("got invalid flag", 0x654); 2426 #endif 2427 2428 if (!is_static && rc == may_not_rewrite) { 2429 // We reuse the code from is_static. It's jumped to via the table above. 2430 return; 2431 } 2432 2433 #ifdef ASSERT 2434 // __ bind(Lvtos); 2435 address pc_before_fence = __ pc(); 2436 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2437 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2438 assert(branch_table[vtos] == 0, "can't compute twice"); 2439 branch_table[vtos] = __ pc(); // non-volatile_entry point 2440 __ stop("vtos unexpected", 0x655); 2441 #endif 2442 2443 __ align(32, 28, 28); // Align load. 2444 // __ bind(Ldtos); 2445 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2446 assert(branch_table[dtos] == 0, "can't compute twice"); 2447 branch_table[dtos] = __ pc(); // non-volatile_entry point 2448 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2449 __ push(dtos); 2450 if (!is_static && rc == may_rewrite) { 2451 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2452 } 2453 { 2454 Label acquire_double; 2455 __ beq(CCR6, acquire_double); // Volatile? 2456 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2457 2458 __ bind(acquire_double); 2459 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2460 __ beq_predict_taken(CCR0, Lisync); 2461 __ b(Lisync); // In case of NAN. 2462 } 2463 2464 __ align(32, 28, 28); // Align load. 2465 // __ bind(Lftos); 2466 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2467 assert(branch_table[ftos] == 0, "can't compute twice"); 2468 branch_table[ftos] = __ pc(); // non-volatile_entry point 2469 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2470 __ push(ftos); 2471 if (!is_static && rc == may_rewrite) { 2472 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); 2473 } 2474 { 2475 Label acquire_float; 2476 __ beq(CCR6, acquire_float); // Volatile? 2477 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2478 2479 __ bind(acquire_float); 2480 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2481 __ beq_predict_taken(CCR0, Lisync); 2482 __ b(Lisync); // In case of NAN. 2483 } 2484 2485 __ align(32, 28, 28); // Align load. 2486 // __ bind(Litos); 2487 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2488 assert(branch_table[itos] == 0, "can't compute twice"); 2489 branch_table[itos] = __ pc(); // non-volatile_entry point 2490 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2491 __ push(itos); 2492 if (!is_static && rc == may_rewrite) { 2493 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2494 } 2495 __ beq(CCR6, Lacquire); // Volatile? 2496 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2497 2498 __ align(32, 28, 28); // Align load. 2499 // __ bind(Lltos); 2500 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2501 assert(branch_table[ltos] == 0, "can't compute twice"); 2502 branch_table[ltos] = __ pc(); // non-volatile_entry point 2503 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2504 __ push(ltos); 2505 if (!is_static && rc == may_rewrite) { 2506 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2507 } 2508 __ beq(CCR6, Lacquire); // Volatile? 2509 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2510 2511 __ align(32, 28, 28); // Align load. 2512 // __ bind(Lbtos); 2513 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2514 assert(branch_table[btos] == 0, "can't compute twice"); 2515 branch_table[btos] = __ pc(); // non-volatile_entry point 2516 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2517 __ extsb(R17_tos, R17_tos); 2518 __ push(btos); 2519 if (!is_static && rc == may_rewrite) { 2520 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2521 } 2522 __ beq(CCR6, Lacquire); // Volatile? 2523 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2524 2525 __ align(32, 28, 28); // Align load. 2526 // __ bind(Lctos); 2527 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2528 assert(branch_table[ctos] == 0, "can't compute twice"); 2529 branch_table[ctos] = __ pc(); // non-volatile_entry point 2530 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2531 __ push(ctos); 2532 if (!is_static && rc == may_rewrite) { 2533 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2534 } 2535 __ beq(CCR6, Lacquire); // Volatile? 2536 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2537 2538 __ align(32, 28, 28); // Align load. 2539 // __ bind(Lstos); 2540 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2541 assert(branch_table[stos] == 0, "can't compute twice"); 2542 branch_table[stos] = __ pc(); // non-volatile_entry point 2543 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2544 __ push(stos); 2545 if (!is_static && rc == may_rewrite) { 2546 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2547 } 2548 __ beq(CCR6, Lacquire); // Volatile? 2549 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2550 2551 __ align(32, 28, 28); // Align load. 2552 // __ bind(Latos); 2553 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2554 assert(branch_table[atos] == 0, "can't compute twice"); 2555 branch_table[atos] = __ pc(); // non-volatile_entry point 2556 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2557 __ verify_oop(R17_tos); 2558 __ push(atos); 2559 //__ dcbt(R17_tos); // prefetch 2560 if (!is_static && rc == may_rewrite) { 2561 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2562 } 2563 __ beq(CCR6, Lacquire); // Volatile? 2564 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2565 2566 __ align(32, 12); 2567 __ bind(Lacquire); 2568 __ twi_0(R17_tos); 2569 __ bind(Lisync); 2570 __ isync(); // acquire 2571 2572 #ifdef ASSERT 2573 for (int i = 0; i<number_of_states; ++i) { 2574 assert(branch_table[i], "get initialization"); 2575 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2576 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2577 } 2578 #endif 2579 } 2580 2581 void TemplateTable::getfield(int byte_no) { 2582 getfield_or_static(byte_no, false); 2583 } 2584 2585 void TemplateTable::nofast_getfield(int byte_no) { 2586 getfield_or_static(byte_no, false, may_not_rewrite); 2587 } 2588 2589 void TemplateTable::getstatic(int byte_no) { 2590 getfield_or_static(byte_no, true); 2591 } 2592 2593 // The registers cache and index expected to be set before call. 2594 // The function may destroy various registers, just not the cache and index registers. 2595 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2596 2597 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2598 2599 if (JvmtiExport::can_post_field_modification()) { 2600 Label Lno_field_mod_post; 2601 2602 // Check if post field access in enabled. 2603 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2604 __ lwz(Rscratch, offs, Rscratch); 2605 2606 __ cmpwi(CCR0, Rscratch, 0); 2607 __ beq(CCR0, Lno_field_mod_post); 2608 2609 // Do the post 2610 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2611 const Register Robj = Rscratch; 2612 2613 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2614 if (is_static) { 2615 // Life is simple. Null out the object pointer. 2616 __ li(Robj, 0); 2617 } else { 2618 // In case of the fast versions, value lives in registers => put it back on tos. 2619 int offs = Interpreter::expr_offset_in_bytes(0); 2620 Register base = R15_esp; 2621 switch(bytecode()) { 2622 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2623 case Bytecodes::_fast_iputfield: // Fall through 2624 case Bytecodes::_fast_bputfield: // Fall through 2625 case Bytecodes::_fast_cputfield: // Fall through 2626 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2627 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2628 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2629 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2630 default: { 2631 offs = 0; 2632 base = Robj; 2633 const Register Rflags = Robj; 2634 Label is_one_slot; 2635 // Life is harder. The stack holds the value on top, followed by the 2636 // object. We don't know the size of the value, though; it could be 2637 // one or two words depending on its type. As a result, we must find 2638 // the type to determine where the object is. 2639 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2640 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2641 2642 __ cmpwi(CCR0, Rflags, ltos); 2643 __ cmpwi(CCR1, Rflags, dtos); 2644 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2645 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); 2646 __ beq(CCR0, is_one_slot); 2647 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2648 __ bind(is_one_slot); 2649 break; 2650 } 2651 } 2652 __ ld(Robj, offs, base); 2653 __ verify_oop(Robj); 2654 } 2655 2656 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2657 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2658 __ get_cache_and_index_at_bcp(Rcache, 1); 2659 2660 // In case of the fast versions, value lives in registers => put it back on tos. 2661 switch(bytecode()) { 2662 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2663 case Bytecodes::_fast_iputfield: // Fall through 2664 case Bytecodes::_fast_bputfield: // Fall through 2665 case Bytecodes::_fast_cputfield: // Fall through 2666 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2667 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2668 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2669 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2670 default: break; // Nothin' to do. 2671 } 2672 2673 __ align(32, 12); 2674 __ bind(Lno_field_mod_post); 2675 } 2676 } 2677 2678 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2679 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { 2680 Label Lvolatile; 2681 2682 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2683 Rclass_or_obj = R31, // Needs to survive C call. 2684 Roffset = R22_tmp2, // Needs to survive C call. 2685 Rflags = R3_ARG1, 2686 Rbtable = R4_ARG2, 2687 Rscratch = R11_scratch1, 2688 Rscratch2 = R12_scratch2, 2689 Rscratch3 = R6_ARG4, 2690 Rbc = Rscratch3; 2691 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2692 2693 static address field_rw_branch_table[number_of_states], 2694 field_norw_branch_table[number_of_states], 2695 static_branch_table[number_of_states]; 2696 2697 address* branch_table = is_static ? static_branch_table : 2698 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table); 2699 2700 // Stack (grows up): 2701 // value 2702 // obj 2703 2704 // Load the field offset. 2705 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2706 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2707 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2708 2709 // Load pointer to branch table. 2710 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2711 2712 // Get volatile flag. 2713 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2714 2715 // Check the field type. 2716 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2717 2718 #ifdef ASSERT 2719 Label LFlagInvalid; 2720 __ cmpldi(CCR0, Rflags, number_of_states); 2721 __ bge(CCR0, LFlagInvalid); 2722 #endif 2723 2724 // Load from branch table and dispatch (volatile case: one instruction ahead). 2725 __ sldi(Rflags, Rflags, LogBytesPerWord); 2726 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2727 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? 2728 } 2729 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2730 __ ldx(Rbtable, Rbtable, Rflags); 2731 2732 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2733 __ mtctr(Rbtable); 2734 __ bctr(); 2735 2736 #ifdef ASSERT 2737 __ bind(LFlagInvalid); 2738 __ stop("got invalid flag", 0x656); 2739 2740 // __ bind(Lvtos); 2741 address pc_before_release = __ pc(); 2742 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2743 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2744 assert(branch_table[vtos] == 0, "can't compute twice"); 2745 branch_table[vtos] = __ pc(); // non-volatile_entry point 2746 __ stop("vtos unexpected", 0x657); 2747 #endif 2748 2749 __ align(32, 28, 28); // Align pop. 2750 // __ bind(Ldtos); 2751 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2752 assert(branch_table[dtos] == 0, "can't compute twice"); 2753 branch_table[dtos] = __ pc(); // non-volatile_entry point 2754 __ pop(dtos); 2755 if (!is_static) { 2756 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2757 } 2758 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2759 if (!is_static && rc == may_rewrite) { 2760 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); 2761 } 2762 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2763 __ beq(CR_is_vol, Lvolatile); // Volatile? 2764 } 2765 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2766 2767 __ align(32, 28, 28); // Align pop. 2768 // __ bind(Lftos); 2769 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2770 assert(branch_table[ftos] == 0, "can't compute twice"); 2771 branch_table[ftos] = __ pc(); // non-volatile_entry point 2772 __ pop(ftos); 2773 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2774 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2775 if (!is_static && rc == may_rewrite) { 2776 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); 2777 } 2778 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2779 __ beq(CR_is_vol, Lvolatile); // Volatile? 2780 } 2781 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2782 2783 __ align(32, 28, 28); // Align pop. 2784 // __ bind(Litos); 2785 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2786 assert(branch_table[itos] == 0, "can't compute twice"); 2787 branch_table[itos] = __ pc(); // non-volatile_entry point 2788 __ pop(itos); 2789 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2790 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2791 if (!is_static && rc == may_rewrite) { 2792 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); 2793 } 2794 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2795 __ beq(CR_is_vol, Lvolatile); // Volatile? 2796 } 2797 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2798 2799 __ align(32, 28, 28); // Align pop. 2800 // __ bind(Lltos); 2801 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2802 assert(branch_table[ltos] == 0, "can't compute twice"); 2803 branch_table[ltos] = __ pc(); // non-volatile_entry point 2804 __ pop(ltos); 2805 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2806 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2807 if (!is_static && rc == may_rewrite) { 2808 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); 2809 } 2810 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2811 __ beq(CR_is_vol, Lvolatile); // Volatile? 2812 } 2813 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2814 2815 __ align(32, 28, 28); // Align pop. 2816 // __ bind(Lbtos); 2817 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2818 assert(branch_table[btos] == 0, "can't compute twice"); 2819 branch_table[btos] = __ pc(); // non-volatile_entry point 2820 __ pop(btos); 2821 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2822 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2823 if (!is_static && rc == may_rewrite) { 2824 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); 2825 } 2826 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2827 __ beq(CR_is_vol, Lvolatile); // Volatile? 2828 } 2829 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2830 2831 __ align(32, 28, 28); // Align pop. 2832 // __ bind(Lctos); 2833 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2834 assert(branch_table[ctos] == 0, "can't compute twice"); 2835 branch_table[ctos] = __ pc(); // non-volatile_entry point 2836 __ pop(ctos); 2837 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2838 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2839 if (!is_static && rc == may_rewrite) { 2840 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); 2841 } 2842 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2843 __ beq(CR_is_vol, Lvolatile); // Volatile? 2844 } 2845 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2846 2847 __ align(32, 28, 28); // Align pop. 2848 // __ bind(Lstos); 2849 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2850 assert(branch_table[stos] == 0, "can't compute twice"); 2851 branch_table[stos] = __ pc(); // non-volatile_entry point 2852 __ pop(stos); 2853 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2854 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2855 if (!is_static && rc == may_rewrite) { 2856 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); 2857 } 2858 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2859 __ beq(CR_is_vol, Lvolatile); // Volatile? 2860 } 2861 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2862 2863 __ align(32, 28, 28); // Align pop. 2864 // __ bind(Latos); 2865 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2866 assert(branch_table[atos] == 0, "can't compute twice"); 2867 branch_table[atos] = __ pc(); // non-volatile_entry point 2868 __ pop(atos); 2869 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2870 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2871 if (!is_static && rc == may_rewrite) { 2872 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); 2873 } 2874 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2875 __ beq(CR_is_vol, Lvolatile); // Volatile? 2876 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2877 2878 __ align(32, 12); 2879 __ bind(Lvolatile); 2880 __ fence(); 2881 } 2882 // fallthru: __ b(Lexit); 2883 2884 #ifdef ASSERT 2885 for (int i = 0; i<number_of_states; ++i) { 2886 assert(branch_table[i], "put initialization"); 2887 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2888 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2889 } 2890 #endif 2891 } 2892 2893 void TemplateTable::putfield(int byte_no) { 2894 putfield_or_static(byte_no, false); 2895 } 2896 2897 void TemplateTable::nofast_putfield(int byte_no) { 2898 putfield_or_static(byte_no, false, may_not_rewrite); 2899 } 2900 2901 void TemplateTable::putstatic(int byte_no) { 2902 putfield_or_static(byte_no, true); 2903 } 2904 2905 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2906 void TemplateTable::jvmti_post_fast_field_mod() { 2907 __ should_not_reach_here(); 2908 } 2909 2910 void TemplateTable::fast_storefield(TosState state) { 2911 transition(state, vtos); 2912 2913 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2914 Rclass_or_obj = R31, // Needs to survive C call. 2915 Roffset = R22_tmp2, // Needs to survive C call. 2916 Rflags = R3_ARG1, 2917 Rscratch = R11_scratch1, 2918 Rscratch2 = R12_scratch2, 2919 Rscratch3 = R4_ARG2; 2920 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2921 2922 // Constant pool already resolved => Load flags and offset of field. 2923 __ get_cache_and_index_at_bcp(Rcache, 1); 2924 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2925 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2926 2927 // Get the obj and the final store addr. 2928 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2929 2930 // Get volatile flag. 2931 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2932 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2933 { 2934 Label LnotVolatile; 2935 __ beq(CCR0, LnotVolatile); 2936 __ release(); 2937 __ align(32, 12); 2938 __ bind(LnotVolatile); 2939 } 2940 2941 // Do the store and fencing. 2942 switch(bytecode()) { 2943 case Bytecodes::_fast_aputfield: 2944 // Store into the field. 2945 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2946 break; 2947 2948 case Bytecodes::_fast_iputfield: 2949 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2950 break; 2951 2952 case Bytecodes::_fast_lputfield: 2953 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2954 break; 2955 2956 case Bytecodes::_fast_bputfield: 2957 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2958 break; 2959 2960 case Bytecodes::_fast_cputfield: 2961 case Bytecodes::_fast_sputfield: 2962 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2963 break; 2964 2965 case Bytecodes::_fast_fputfield: 2966 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2967 break; 2968 2969 case Bytecodes::_fast_dputfield: 2970 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2971 break; 2972 2973 default: ShouldNotReachHere(); 2974 } 2975 2976 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2977 Label LVolatile; 2978 __ beq(CR_is_vol, LVolatile); 2979 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2980 2981 __ align(32, 12); 2982 __ bind(LVolatile); 2983 __ fence(); 2984 } 2985 } 2986 2987 void TemplateTable::fast_accessfield(TosState state) { 2988 transition(atos, state); 2989 2990 Label LisVolatile; 2991 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2992 2993 const Register Rcache = R3_ARG1, 2994 Rclass_or_obj = R17_tos, 2995 Roffset = R22_tmp2, 2996 Rflags = R23_tmp3, 2997 Rscratch = R12_scratch2; 2998 2999 // Constant pool already resolved. Get the field offset. 3000 __ get_cache_and_index_at_bcp(Rcache, 1); 3001 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3002 3003 // JVMTI support 3004 jvmti_post_field_access(Rcache, Rscratch, false, true); 3005 3006 // Get the load address. 3007 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3008 3009 // Get volatile flag. 3010 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3011 __ bne(CCR0, LisVolatile); 3012 3013 switch(bytecode()) { 3014 case Bytecodes::_fast_agetfield: 3015 { 3016 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3017 __ verify_oop(R17_tos); 3018 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3019 3020 __ bind(LisVolatile); 3021 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3022 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3023 __ verify_oop(R17_tos); 3024 __ twi_0(R17_tos); 3025 __ isync(); 3026 break; 3027 } 3028 case Bytecodes::_fast_igetfield: 3029 { 3030 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3031 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3032 3033 __ bind(LisVolatile); 3034 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3035 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3036 __ twi_0(R17_tos); 3037 __ isync(); 3038 break; 3039 } 3040 case Bytecodes::_fast_lgetfield: 3041 { 3042 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3043 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3044 3045 __ bind(LisVolatile); 3046 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3047 __ ldx(R17_tos, Rclass_or_obj, Roffset); 3048 __ twi_0(R17_tos); 3049 __ isync(); 3050 break; 3051 } 3052 case Bytecodes::_fast_bgetfield: 3053 { 3054 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3055 __ extsb(R17_tos, R17_tos); 3056 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3057 3058 __ bind(LisVolatile); 3059 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3060 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3061 __ twi_0(R17_tos); 3062 __ extsb(R17_tos, R17_tos); 3063 __ isync(); 3064 break; 3065 } 3066 case Bytecodes::_fast_cgetfield: 3067 { 3068 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3069 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3070 3071 __ bind(LisVolatile); 3072 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3073 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3074 __ twi_0(R17_tos); 3075 __ isync(); 3076 break; 3077 } 3078 case Bytecodes::_fast_sgetfield: 3079 { 3080 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3081 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3082 3083 __ bind(LisVolatile); 3084 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3085 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3086 __ twi_0(R17_tos); 3087 __ isync(); 3088 break; 3089 } 3090 case Bytecodes::_fast_fgetfield: 3091 { 3092 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3093 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3094 3095 __ bind(LisVolatile); 3096 Label Ldummy; 3097 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3098 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3099 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3100 __ bne_predict_not_taken(CCR0, Ldummy); 3101 __ bind(Ldummy); 3102 __ isync(); 3103 break; 3104 } 3105 case Bytecodes::_fast_dgetfield: 3106 { 3107 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3108 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3109 3110 __ bind(LisVolatile); 3111 Label Ldummy; 3112 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3113 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3114 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3115 __ bne_predict_not_taken(CCR0, Ldummy); 3116 __ bind(Ldummy); 3117 __ isync(); 3118 break; 3119 } 3120 default: ShouldNotReachHere(); 3121 } 3122 } 3123 3124 void TemplateTable::fast_xaccess(TosState state) { 3125 transition(vtos, state); 3126 3127 Label LisVolatile; 3128 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3129 const Register Rcache = R3_ARG1, 3130 Rclass_or_obj = R17_tos, 3131 Roffset = R22_tmp2, 3132 Rflags = R23_tmp3, 3133 Rscratch = R12_scratch2; 3134 3135 __ ld(Rclass_or_obj, 0, R18_locals); 3136 3137 // Constant pool already resolved. Get the field offset. 3138 __ get_cache_and_index_at_bcp(Rcache, 2); 3139 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3140 3141 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3142 3143 // Needed to report exception at the correct bcp. 3144 __ addi(R14_bcp, R14_bcp, 1); 3145 3146 // Get the load address. 3147 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3148 3149 // Get volatile flag. 3150 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3151 __ bne(CCR0, LisVolatile); 3152 3153 switch(state) { 3154 case atos: 3155 { 3156 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3157 __ verify_oop(R17_tos); 3158 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3159 3160 __ bind(LisVolatile); 3161 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3162 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3163 __ verify_oop(R17_tos); 3164 __ twi_0(R17_tos); 3165 __ isync(); 3166 break; 3167 } 3168 case itos: 3169 { 3170 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3171 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3172 3173 __ bind(LisVolatile); 3174 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3175 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3176 __ twi_0(R17_tos); 3177 __ isync(); 3178 break; 3179 } 3180 case ftos: 3181 { 3182 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3183 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3184 3185 __ bind(LisVolatile); 3186 Label Ldummy; 3187 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3188 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3189 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3190 __ bne_predict_not_taken(CCR0, Ldummy); 3191 __ bind(Ldummy); 3192 __ isync(); 3193 break; 3194 } 3195 default: ShouldNotReachHere(); 3196 } 3197 __ addi(R14_bcp, R14_bcp, -1); 3198 } 3199 3200 // ============================================================================ 3201 // Calls 3202 3203 // Common code for invoke 3204 // 3205 // Input: 3206 // - byte_no 3207 // 3208 // Output: 3209 // - Rmethod: The method to invoke next. 3210 // - Rret_addr: The return address to return to. 3211 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3212 // - Rrecv: Cache for "this" pointer, might be noreg if static call. 3213 // - Rflags: Method flags from const pool cache. 3214 // 3215 // Kills: 3216 // - Rscratch1 3217 // 3218 void TemplateTable::prepare_invoke(int byte_no, 3219 Register Rmethod, // linked method (or i-klass) 3220 Register Rret_addr,// return address 3221 Register Rindex, // itable index, MethodType, etc. 3222 Register Rrecv, // If caller wants to see it. 3223 Register Rflags, // If caller wants to test it. 3224 Register Rscratch 3225 ) { 3226 // Determine flags. 3227 const Bytecodes::Code code = bytecode(); 3228 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3229 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3230 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3231 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3232 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3233 const bool load_receiver = (Rrecv != noreg); 3234 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3235 3236 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3237 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3238 assert_different_registers(Rret_addr, Rscratch); 3239 3240 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3241 3242 // Saving of SP done in call_from_interpreter. 3243 3244 // Maybe push "appendix" to arguments. 3245 if (is_invokedynamic || is_invokehandle) { 3246 Label Ldone; 3247 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3248 __ beq(CCR0, Ldone); 3249 // Push "appendix" (MethodType, CallSite, etc.). 3250 // This must be done before we get the receiver, 3251 // since the parameter_size includes it. 3252 __ load_resolved_reference_at_index(Rscratch, Rindex); 3253 __ verify_oop(Rscratch); 3254 __ push_ptr(Rscratch); 3255 __ bind(Ldone); 3256 } 3257 3258 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3259 if (load_receiver) { 3260 const Register Rparam_count = Rscratch; 3261 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3262 __ load_receiver(Rparam_count, Rrecv); 3263 __ verify_oop(Rrecv); 3264 } 3265 3266 // Get return address. 3267 { 3268 Register Rtable_addr = Rscratch; 3269 Register Rret_type = Rret_addr; 3270 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3271 3272 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3273 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3274 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3275 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3276 // Get return address. 3277 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3278 } 3279 } 3280 3281 // Helper for virtual calls. Load target out of vtable and jump off! 3282 // Kills all passed registers. 3283 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3284 3285 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3286 const Register Rtarget_method = Rindex; 3287 3288 // Get target method & entry point. 3289 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3290 // Calc vtable addr scale the vtable index by 8. 3291 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3292 // Load target. 3293 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3294 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3295 // Argument and return type profiling. 3296 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true); 3297 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3298 } 3299 3300 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3301 void TemplateTable::invokevirtual(int byte_no) { 3302 transition(vtos, vtos); 3303 3304 Register Rtable_addr = R11_scratch1, 3305 Rret_type = R12_scratch2, 3306 Rret_addr = R5_ARG3, 3307 Rflags = R22_tmp2, // Should survive C call. 3308 Rrecv = R3_ARG1, 3309 Rrecv_klass = Rrecv, 3310 Rvtableindex_or_method = R31, // Should survive C call. 3311 Rnum_params = R4_ARG2, 3312 Rnew_bc = R6_ARG4; 3313 3314 Label LnotFinal; 3315 3316 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3317 3318 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3319 __ bfalse(CCR0, LnotFinal); 3320 3321 if (RewriteBytecodes && !UseSharedSpaces) { 3322 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3323 } 3324 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3325 3326 __ align(32, 12); 3327 __ bind(LnotFinal); 3328 // Load "this" pointer (receiver). 3329 __ rldicl(Rnum_params, Rflags, 64, 48); 3330 __ load_receiver(Rnum_params, Rrecv); 3331 __ verify_oop(Rrecv); 3332 3333 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3334 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3335 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3336 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3337 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3338 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3339 __ load_klass(Rrecv_klass, Rrecv); 3340 __ verify_klass_ptr(Rrecv_klass); 3341 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3342 3343 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3344 } 3345 3346 void TemplateTable::fast_invokevfinal(int byte_no) { 3347 transition(vtos, vtos); 3348 3349 assert(byte_no == f2_byte, "use this argument"); 3350 Register Rflags = R22_tmp2, 3351 Rmethod = R31; 3352 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3353 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3354 } 3355 3356 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3357 3358 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3359 3360 // Load receiver from stack slot. 3361 Register Rrecv = Rscratch2; 3362 Register Rnum_params = Rrecv; 3363 3364 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3365 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3366 3367 // Get return address. 3368 Register Rtable_addr = Rscratch1, 3369 Rret_addr = Rflags, 3370 Rret_type = Rret_addr; 3371 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3372 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3373 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3374 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3375 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3376 3377 // Load receiver and receiver NULL check. 3378 __ load_receiver(Rnum_params, Rrecv); 3379 __ null_check_throw(Rrecv, -1, Rscratch1); 3380 3381 __ profile_final_call(Rrecv, Rscratch1); 3382 // Argument and return type profiling. 3383 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3384 3385 // Do the call. 3386 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3387 } 3388 3389 void TemplateTable::invokespecial(int byte_no) { 3390 assert(byte_no == f1_byte, "use this argument"); 3391 transition(vtos, vtos); 3392 3393 Register Rtable_addr = R3_ARG1, 3394 Rret_addr = R4_ARG2, 3395 Rflags = R5_ARG3, 3396 Rreceiver = R6_ARG4, 3397 Rmethod = R31; 3398 3399 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3400 3401 // Receiver NULL check. 3402 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3403 3404 __ profile_call(R11_scratch1, R12_scratch2); 3405 // Argument and return type profiling. 3406 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false); 3407 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3408 } 3409 3410 void TemplateTable::invokestatic(int byte_no) { 3411 assert(byte_no == f1_byte, "use this argument"); 3412 transition(vtos, vtos); 3413 3414 Register Rtable_addr = R3_ARG1, 3415 Rret_addr = R4_ARG2, 3416 Rflags = R5_ARG3; 3417 3418 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3419 3420 __ profile_call(R11_scratch1, R12_scratch2); 3421 // Argument and return type profiling. 3422 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false); 3423 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3424 } 3425 3426 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3427 Register Rret, 3428 Register Rflags, 3429 Register Rindex, 3430 Register Rtemp1, 3431 Register Rtemp2) { 3432 3433 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3434 Label LnotFinal; 3435 3436 // Check for vfinal. 3437 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3438 __ bfalse(CCR0, LnotFinal); 3439 3440 Register Rscratch = Rflags; // Rflags is dead now. 3441 3442 // Final call case. 3443 __ profile_final_call(Rtemp1, Rscratch); 3444 // Argument and return type profiling. 3445 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true); 3446 // Do the final call - the index (f2) contains the method. 3447 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3448 3449 // Non-final callc case. 3450 __ bind(LnotFinal); 3451 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3452 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3453 } 3454 3455 void TemplateTable::invokeinterface(int byte_no) { 3456 assert(byte_no == f1_byte, "use this argument"); 3457 transition(vtos, vtos); 3458 3459 const Register Rscratch1 = R11_scratch1, 3460 Rscratch2 = R12_scratch2, 3461 Rscratch3 = R9_ARG7, 3462 Rscratch4 = R10_ARG8, 3463 Rtable_addr = Rscratch2, 3464 Rinterface_klass = R5_ARG3, 3465 Rret_type = R8_ARG6, 3466 Rret_addr = Rret_type, 3467 Rindex = R6_ARG4, 3468 Rreceiver = R4_ARG2, 3469 Rrecv_klass = Rreceiver, 3470 Rflags = R7_ARG5; 3471 3472 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3473 3474 // Get receiver klass. 3475 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3476 __ load_klass(Rrecv_klass, Rreceiver); 3477 3478 // Check corner case object method. 3479 Label LobjectMethod; 3480 3481 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3482 __ btrue(CCR0, LobjectMethod); 3483 3484 // Fallthrough: The normal invokeinterface case. 3485 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3486 3487 // Find entry point to call. 3488 Label Lthrow_icc, Lthrow_ame; 3489 // Result will be returned in Rindex. 3490 __ mr(Rscratch4, Rrecv_klass); 3491 __ mr(Rscratch3, Rindex); 3492 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3493 3494 __ cmpdi(CCR0, Rindex, 0); 3495 __ beq(CCR0, Lthrow_ame); 3496 // Found entry. Jump off! 3497 // Argument and return type profiling. 3498 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true); 3499 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3500 3501 // Vtable entry was NULL => Throw abstract method error. 3502 __ bind(Lthrow_ame); 3503 __ mr(Rrecv_klass, Rscratch4); 3504 __ mr(Rindex, Rscratch3); 3505 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3506 3507 // Interface was not found => Throw incompatible class change error. 3508 __ bind(Lthrow_icc); 3509 __ mr(Rrecv_klass, Rscratch4); 3510 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3511 3512 __ should_not_reach_here(); 3513 3514 // Special case of invokeinterface called for virtual method of 3515 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3516 // The invokeinterface was rewritten to a invokevirtual, hence we have 3517 // to handle this corner case. This code isn't produced by javac, but could 3518 // be produced by another compliant java compiler. 3519 __ bind(LobjectMethod); 3520 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3521 } 3522 3523 void TemplateTable::invokedynamic(int byte_no) { 3524 transition(vtos, vtos); 3525 3526 const Register Rret_addr = R3_ARG1, 3527 Rflags = R4_ARG2, 3528 Rmethod = R22_tmp2, 3529 Rscratch1 = R11_scratch1, 3530 Rscratch2 = R12_scratch2; 3531 3532 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3533 3534 // Profile this call. 3535 __ profile_call(Rscratch1, Rscratch2); 3536 3537 // Off we go. With the new method handles, we don't jump to a method handle 3538 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3539 // to be the callsite object the bootstrap method returned. This is passed to a 3540 // "link" method which does the dispatch (Most likely just grabs the MH stored 3541 // inside the callsite and does an invokehandle). 3542 // Argument and return type profiling. 3543 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false); 3544 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3545 } 3546 3547 void TemplateTable::invokehandle(int byte_no) { 3548 transition(vtos, vtos); 3549 3550 const Register Rret_addr = R3_ARG1, 3551 Rflags = R4_ARG2, 3552 Rrecv = R5_ARG3, 3553 Rmethod = R22_tmp2, 3554 Rscratch1 = R11_scratch1, 3555 Rscratch2 = R12_scratch2; 3556 3557 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3558 __ verify_method_ptr(Rmethod); 3559 __ null_check_throw(Rrecv, -1, Rscratch2); 3560 3561 __ profile_final_call(Rrecv, Rscratch1); 3562 3563 // Still no call from handle => We call the method handle interpreter here. 3564 // Argument and return type profiling. 3565 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true); 3566 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3567 } 3568 3569 // ============================================================================= 3570 // Allocation 3571 3572 // Puts allocated obj ref onto the expression stack. 3573 void TemplateTable::_new() { 3574 transition(vtos, atos); 3575 3576 Label Lslow_case, 3577 Ldone, 3578 Linitialize_header, 3579 Lallocate_shared, 3580 Linitialize_object; // Including clearing the fields. 3581 3582 const Register RallocatedObject = R17_tos, 3583 RinstanceKlass = R9_ARG7, 3584 Rscratch = R11_scratch1, 3585 Roffset = R8_ARG6, 3586 Rinstance_size = Roffset, 3587 Rcpool = R4_ARG2, 3588 Rtags = R3_ARG1, 3589 Rindex = R5_ARG3; 3590 3591 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); 3592 3593 // -------------------------------------------------------------------------- 3594 // Check if fast case is possible. 3595 3596 // Load pointers to const pool and const pool's tags array. 3597 __ get_cpool_and_tags(Rcpool, Rtags); 3598 // Load index of constant pool entry. 3599 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3600 3601 if (UseTLAB) { 3602 // Make sure the class we're about to instantiate has been resolved 3603 // This is done before loading instanceKlass to be consistent with the order 3604 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3605 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3606 __ lbzx(Rtags, Rindex, Rtags); 3607 3608 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3609 __ bne(CCR0, Lslow_case); 3610 3611 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3612 __ sldi(Roffset, Rindex, LogBytesPerWord); 3613 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3614 __ isync(); // Order load of instance Klass wrt. tags. 3615 __ ldx(RinstanceKlass, Roffset, Rscratch); 3616 3617 // Make sure klass is fully initialized and get instance_size. 3618 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3619 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3620 3621 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3622 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3623 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3624 3625 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized? 3626 __ beq(CCR0, Lslow_case); 3627 3628 // -------------------------------------------------------------------------- 3629 // Fast case: 3630 // Allocate the instance. 3631 // 1) Try to allocate in the TLAB. 3632 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3633 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3634 3635 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3636 Register RnewTopValue = R6_ARG4; 3637 Register RendValue = R7_ARG5; 3638 3639 // Check if we can allocate in the TLAB. 3640 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3641 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3642 3643 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3644 3645 // If there is enough space, we do not CAS and do not clear. 3646 __ cmpld(CCR0, RnewTopValue, RendValue); 3647 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3648 3649 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3650 3651 if (ZeroTLAB) { 3652 // The fields have already been cleared. 3653 __ b(Linitialize_header); 3654 } else { 3655 // Initialize both the header and fields. 3656 __ b(Linitialize_object); 3657 } 3658 3659 // Fall through: TLAB was too small. 3660 if (allow_shared_alloc) { 3661 Register RtlabWasteLimitValue = R10_ARG8; 3662 Register RfreeValue = RnewTopValue; 3663 3664 __ bind(Lallocate_shared); 3665 // Check if tlab should be discarded (refill_waste_limit >= free). 3666 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3667 __ subf(RfreeValue, RoldTopValue, RendValue); 3668 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3669 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3670 __ bge(CCR0, Lslow_case); 3671 3672 // Increment waste limit to prevent getting stuck on this slow path. 3673 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3674 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3675 } 3676 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3677 } 3678 // else: Always go the slow path. 3679 3680 // -------------------------------------------------------------------------- 3681 // slow case 3682 __ bind(Lslow_case); 3683 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3684 3685 if (UseTLAB) { 3686 __ b(Ldone); 3687 // -------------------------------------------------------------------------- 3688 // Init1: Zero out newly allocated memory. 3689 3690 if (!ZeroTLAB || allow_shared_alloc) { 3691 // Clear object fields. 3692 __ bind(Linitialize_object); 3693 3694 // Initialize remaining object fields. 3695 Register Rbase = Rtags; 3696 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3697 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3698 __ srdi(Rinstance_size, Rinstance_size, 3); 3699 3700 // Clear out object skipping header. Takes also care of the zero length case. 3701 __ clear_memory_doubleword(Rbase, Rinstance_size); 3702 // fallthru: __ b(Linitialize_header); 3703 } 3704 3705 // -------------------------------------------------------------------------- 3706 // Init2: Initialize the header: mark, klass 3707 __ bind(Linitialize_header); 3708 3709 // Init mark. 3710 if (UseBiasedLocking) { 3711 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3712 } else { 3713 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3714 } 3715 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3716 3717 // Init klass. 3718 __ store_klass_gap(RallocatedObject); 3719 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3720 3721 // Check and trigger dtrace event. 3722 { 3723 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3724 __ push(atos); 3725 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3726 __ pop(atos); 3727 } 3728 } 3729 3730 // continue 3731 __ bind(Ldone); 3732 3733 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3734 __ membar(Assembler::StoreStore); 3735 } 3736 3737 void TemplateTable::newarray() { 3738 transition(itos, atos); 3739 3740 __ lbz(R4, 1, R14_bcp); 3741 __ extsw(R5, R17_tos); 3742 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3743 3744 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3745 __ membar(Assembler::StoreStore); 3746 } 3747 3748 void TemplateTable::anewarray() { 3749 transition(itos, atos); 3750 3751 __ get_constant_pool(R4); 3752 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3753 __ extsw(R6, R17_tos); // size 3754 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3755 3756 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3757 __ membar(Assembler::StoreStore); 3758 } 3759 3760 // Allocate a multi dimensional array 3761 void TemplateTable::multianewarray() { 3762 transition(vtos, atos); 3763 3764 Register Rptr = R31; // Needs to survive C call. 3765 3766 // Put ndims * wordSize into frame temp slot 3767 __ lbz(Rptr, 3, R14_bcp); 3768 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3769 // Esp points past last_dim, so set to R4 to first_dim address. 3770 __ add(R4, Rptr, R15_esp); 3771 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3772 // Pop all dimensions off the stack. 3773 __ add(R15_esp, Rptr, R15_esp); 3774 3775 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3776 __ membar(Assembler::StoreStore); 3777 } 3778 3779 void TemplateTable::arraylength() { 3780 transition(atos, itos); 3781 3782 Label LnoException; 3783 __ verify_oop(R17_tos); 3784 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3785 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3786 } 3787 3788 // ============================================================================ 3789 // Typechecks 3790 3791 void TemplateTable::checkcast() { 3792 transition(atos, atos); 3793 3794 Label Ldone, Lis_null, Lquicked, Lresolved; 3795 Register Roffset = R6_ARG4, 3796 RobjKlass = R4_ARG2, 3797 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3798 Rcpool = R11_scratch1, 3799 Rtags = R12_scratch2; 3800 3801 // Null does not pass. 3802 __ cmpdi(CCR0, R17_tos, 0); 3803 __ beq(CCR0, Lis_null); 3804 3805 // Get constant pool tag to find out if the bytecode has already been "quickened". 3806 __ get_cpool_and_tags(Rcpool, Rtags); 3807 3808 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3809 3810 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3811 __ lbzx(Rtags, Rtags, Roffset); 3812 3813 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3814 __ beq(CCR0, Lquicked); 3815 3816 // Call into the VM to "quicken" instanceof. 3817 __ push_ptr(); // for GC 3818 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3819 __ get_vm_result_2(RspecifiedKlass); 3820 __ pop_ptr(); // Restore receiver. 3821 __ b(Lresolved); 3822 3823 // Extract target class from constant pool. 3824 __ bind(Lquicked); 3825 __ sldi(Roffset, Roffset, LogBytesPerWord); 3826 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3827 __ isync(); // Order load of specified Klass wrt. tags. 3828 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3829 3830 // Do the checkcast. 3831 __ bind(Lresolved); 3832 // Get value klass in RobjKlass. 3833 __ load_klass(RobjKlass, R17_tos); 3834 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3835 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3836 3837 // Not a subtype; so must throw exception 3838 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3839 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3840 __ mtctr(R11_scratch1); 3841 __ bctr(); 3842 3843 // Profile the null case. 3844 __ align(32, 12); 3845 __ bind(Lis_null); 3846 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3847 3848 __ align(32, 12); 3849 __ bind(Ldone); 3850 } 3851 3852 // Output: 3853 // - tos == 0: Obj was null or not an instance of class. 3854 // - tos == 1: Obj was an instance of class. 3855 void TemplateTable::instanceof() { 3856 transition(atos, itos); 3857 3858 Label Ldone, Lis_null, Lquicked, Lresolved; 3859 Register Roffset = R6_ARG4, 3860 RobjKlass = R4_ARG2, 3861 RspecifiedKlass = R5_ARG3, 3862 Rcpool = R11_scratch1, 3863 Rtags = R12_scratch2; 3864 3865 // Null does not pass. 3866 __ cmpdi(CCR0, R17_tos, 0); 3867 __ beq(CCR0, Lis_null); 3868 3869 // Get constant pool tag to find out if the bytecode has already been "quickened". 3870 __ get_cpool_and_tags(Rcpool, Rtags); 3871 3872 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3873 3874 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3875 __ lbzx(Rtags, Rtags, Roffset); 3876 3877 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3878 __ beq(CCR0, Lquicked); 3879 3880 // Call into the VM to "quicken" instanceof. 3881 __ push_ptr(); // for GC 3882 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3883 __ get_vm_result_2(RspecifiedKlass); 3884 __ pop_ptr(); // Restore receiver. 3885 __ b(Lresolved); 3886 3887 // Extract target class from constant pool. 3888 __ bind(Lquicked); 3889 __ sldi(Roffset, Roffset, LogBytesPerWord); 3890 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3891 __ isync(); // Order load of specified Klass wrt. tags. 3892 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3893 3894 // Do the checkcast. 3895 __ bind(Lresolved); 3896 // Get value klass in RobjKlass. 3897 __ load_klass(RobjKlass, R17_tos); 3898 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3899 __ li(R17_tos, 1); 3900 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3901 __ li(R17_tos, 0); 3902 3903 if (ProfileInterpreter) { 3904 __ b(Ldone); 3905 } 3906 3907 // Profile the null case. 3908 __ align(32, 12); 3909 __ bind(Lis_null); 3910 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3911 3912 __ align(32, 12); 3913 __ bind(Ldone); 3914 } 3915 3916 // ============================================================================= 3917 // Breakpoints 3918 3919 void TemplateTable::_breakpoint() { 3920 transition(vtos, vtos); 3921 3922 // Get the unpatched byte code. 3923 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3924 __ mr(R31, R3_RET); 3925 3926 // Post the breakpoint event. 3927 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3928 3929 // Complete the execution of original bytecode. 3930 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3931 } 3932 3933 // ============================================================================= 3934 // Exceptions 3935 3936 void TemplateTable::athrow() { 3937 transition(atos, vtos); 3938 3939 // Exception oop is in tos 3940 __ verify_oop(R17_tos); 3941 3942 __ null_check_throw(R17_tos, -1, R11_scratch1); 3943 3944 // Throw exception interpreter entry expects exception oop to be in R3. 3945 __ mr(R3_RET, R17_tos); 3946 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3947 __ mtctr(R11_scratch1); 3948 __ bctr(); 3949 } 3950 3951 // ============================================================================= 3952 // Synchronization 3953 // Searches the basic object lock list on the stack for a free slot 3954 // and uses it to lock the obect in tos. 3955 // 3956 // Recursive locking is enabled by exiting the search if the same 3957 // object is already found in the list. Thus, a new basic lock obj lock 3958 // is allocated "higher up" in the stack and thus is found first 3959 // at next monitor exit. 3960 void TemplateTable::monitorenter() { 3961 transition(atos, vtos); 3962 3963 __ verify_oop(R17_tos); 3964 3965 Register Rcurrent_monitor = R11_scratch1, 3966 Rcurrent_obj = R12_scratch2, 3967 Robj_to_lock = R17_tos, 3968 Rscratch1 = R3_ARG1, 3969 Rscratch2 = R4_ARG2, 3970 Rscratch3 = R5_ARG3, 3971 Rcurrent_obj_addr = R6_ARG4; 3972 3973 // ------------------------------------------------------------------------------ 3974 // Null pointer exception. 3975 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3976 3977 // Try to acquire a lock on the object. 3978 // Repeat until succeeded (i.e., until monitorenter returns true). 3979 3980 // ------------------------------------------------------------------------------ 3981 // Find a free slot in the monitor block. 3982 Label Lfound, Lexit, Lallocate_new; 3983 ConditionRegister found_free_slot = CCR0, 3984 found_same_obj = CCR1, 3985 reached_limit = CCR6; 3986 { 3987 Label Lloop, Lentry; 3988 Register Rlimit = Rcurrent_monitor; 3989 3990 // Set up search loop - start with topmost monitor. 3991 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3992 3993 __ ld(Rlimit, 0, R1_SP); 3994 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3995 3996 // Check if any slot is present => short cut to allocation if not. 3997 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3998 __ bgt(reached_limit, Lallocate_new); 3999 4000 // Pre-load topmost slot. 4001 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4002 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4003 // The search loop. 4004 __ bind(Lloop); 4005 // Found free slot? 4006 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 4007 // Is this entry for same obj? If so, stop the search and take the found 4008 // free slot or allocate a new one to enable recursive locking. 4009 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 4010 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 4011 __ beq(found_free_slot, Lexit); 4012 __ beq(found_same_obj, Lallocate_new); 4013 __ bgt(reached_limit, Lallocate_new); 4014 // Check if last allocated BasicLockObj reached. 4015 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4016 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4017 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4018 __ b(Lloop); 4019 } 4020 4021 // ------------------------------------------------------------------------------ 4022 // Check if we found a free slot. 4023 __ bind(Lexit); 4024 4025 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4026 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 4027 __ b(Lfound); 4028 4029 // We didn't find a free BasicObjLock => allocate one. 4030 __ align(32, 12); 4031 __ bind(Lallocate_new); 4032 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 4033 __ mr(Rcurrent_monitor, R26_monitor); 4034 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4035 4036 // ------------------------------------------------------------------------------ 4037 // We now have a slot to lock. 4038 __ bind(Lfound); 4039 4040 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 4041 // The object has already been poped from the stack, so the expression stack looks correct. 4042 __ addi(R14_bcp, R14_bcp, 1); 4043 4044 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 4045 __ lock_object(Rcurrent_monitor, Robj_to_lock); 4046 4047 // Check if there's enough space on the stack for the monitors after locking. 4048 Label Lskip_stack_check; 4049 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 4050 // the stack check. There should be enough shadow pages to fit that in. 4051 __ ld(Rscratch3, 0, R1_SP); 4052 __ sub(Rscratch3, Rscratch3, R26_monitor); 4053 __ cmpdi(CCR0, Rscratch3, 4*K); 4054 __ blt(CCR0, Lskip_stack_check); 4055 4056 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 4057 __ li(Rscratch1, 0); 4058 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 4059 4060 __ align(32, 12); 4061 __ bind(Lskip_stack_check); 4062 4063 // The bcp has already been incremented. Just need to dispatch to next instruction. 4064 __ dispatch_next(vtos); 4065 } 4066 4067 void TemplateTable::monitorexit() { 4068 transition(atos, vtos); 4069 __ verify_oop(R17_tos); 4070 4071 Register Rcurrent_monitor = R11_scratch1, 4072 Rcurrent_obj = R12_scratch2, 4073 Robj_to_lock = R17_tos, 4074 Rcurrent_obj_addr = R3_ARG1, 4075 Rlimit = R4_ARG2; 4076 Label Lfound, Lillegal_monitor_state; 4077 4078 // Check corner case: unbalanced monitorEnter / Exit. 4079 __ ld(Rlimit, 0, R1_SP); 4080 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4081 4082 // Null pointer check. 4083 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4084 4085 __ cmpld(CCR0, R26_monitor, Rlimit); 4086 __ bgt(CCR0, Lillegal_monitor_state); 4087 4088 // Find the corresponding slot in the monitors stack section. 4089 { 4090 Label Lloop; 4091 4092 // Start with topmost monitor. 4093 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4094 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4095 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4096 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4097 4098 __ bind(Lloop); 4099 // Is this entry for same obj? 4100 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4101 __ beq(CCR0, Lfound); 4102 4103 // Check if last allocated BasicLockObj reached. 4104 4105 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4106 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4107 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4108 4109 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4110 __ ble(CCR0, Lloop); 4111 } 4112 4113 // Fell through without finding the basic obj lock => throw up! 4114 __ bind(Lillegal_monitor_state); 4115 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4116 __ should_not_reach_here(); 4117 4118 __ align(32, 12); 4119 __ bind(Lfound); 4120 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4121 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4122 __ unlock_object(Rcurrent_monitor); 4123 } 4124 4125 // ============================================================================ 4126 // Wide bytecodes 4127 4128 // Wide instructions. Simply redirects to the wide entry point for that instruction. 4129 void TemplateTable::wide() { 4130 transition(vtos, vtos); 4131 4132 const Register Rtable = R11_scratch1, 4133 Rindex = R12_scratch2, 4134 Rtmp = R0; 4135 4136 __ lbz(Rindex, 1, R14_bcp); 4137 4138 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4139 4140 __ slwi(Rindex, Rindex, LogBytesPerWord); 4141 __ ldx(Rtmp, Rtable, Rindex); 4142 __ mtctr(Rtmp); 4143 __ bctr(); 4144 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4145 }