1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2019, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "memory/universe.hpp" 39 #include "nativeInst_ppc.hpp" 40 #include "oops/compressedOops.hpp" 41 #include "oops/objArrayKlass.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/safepointMechanism.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 46 #define __ _masm-> 47 48 49 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 50 51 52 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 53 Unimplemented(); return false; // Currently not used on this platform. 54 } 55 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 return FrameMap::R3_oop_opr; 59 } 60 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::R3_opr; 64 } 65 66 67 // This specifies the stack pointer decrement needed to build the frame. 68 int LIR_Assembler::initial_frame_size_in_bytes() const { 69 return in_bytes(frame_map()->framesize_in_bytes()); 70 } 71 72 73 // Inline cache check: the inline cached class is in inline_cache_reg; 74 // we fetch the class of the receiver and compare it with the cached class. 75 // If they do not match we jump to slow case. 76 int LIR_Assembler::check_icache() { 77 int offset = __ offset(); 78 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 79 return offset; 80 } 81 82 void LIR_Assembler::clinit_barrier(ciMethod* method) { 83 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 84 85 Label L_skip_barrier; 86 Register klass = R20; 87 88 metadata2reg(method->holder()->constant_encoding(), klass); 89 __ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/); 90 91 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0); 92 __ mtctr(klass); 93 __ bctr(); 94 95 __ bind(L_skip_barrier); 96 } 97 98 void LIR_Assembler::osr_entry() { 99 // On-stack-replacement entry sequence: 100 // 101 // 1. Create a new compiled activation. 102 // 2. Initialize local variables in the compiled activation. The expression 103 // stack must be empty at the osr_bci; it is not initialized. 104 // 3. Jump to the continuation address in compiled code to resume execution. 105 106 // OSR entry point 107 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 108 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 109 ValueStack* entry_state = osr_entry->end()->state(); 110 int number_of_locks = entry_state->locks_size(); 111 112 // Create a frame for the compiled activation. 113 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 114 115 // OSR buffer is 116 // 117 // locals[nlocals-1..0] 118 // monitors[number_of_locks-1..0] 119 // 120 // Locals is a direct copy of the interpreter frame so in the osr buffer 121 // the first slot in the local array is the last local from the interpreter 122 // and the last slot is local[0] (receiver) from the interpreter. 123 // 124 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 125 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 126 // in the interpreter frame (the method lock if a sync method). 127 128 // Initialize monitors in the compiled activation. 129 // R3: pointer to osr buffer 130 // 131 // All other registers are dead at this point and the locals will be 132 // copied into place by code emitted in the IR. 133 134 Register OSR_buf = osrBufferPointer()->as_register(); 135 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 136 int monitor_offset = BytesPerWord * method()->max_locals() + 137 (2 * BytesPerWord) * (number_of_locks - 1); 138 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 139 // the OSR buffer using 2 word entries: first the lock and then 140 // the oop. 141 for (int i = 0; i < number_of_locks; i++) { 142 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 143 #ifdef ASSERT 144 // Verify the interpreter's monitor has a non-null object. 145 { 146 Label L; 147 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 148 __ cmpdi(CCR0, R0, 0); 149 __ bne(CCR0, L); 150 __ stop("locked object is NULL"); 151 __ bind(L); 152 } 153 #endif // ASSERT 154 // Copy the lock field into the compiled activation. 155 Address ml = frame_map()->address_for_monitor_lock(i), 156 mo = frame_map()->address_for_monitor_object(i); 157 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 158 __ ld(R0, slot_offset + 0, OSR_buf); 159 __ std(R0, ml.disp(), ml.base()); 160 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 161 __ std(R0, mo.disp(), mo.base()); 162 } 163 } 164 } 165 166 167 int LIR_Assembler::emit_exception_handler() { 168 // If the last instruction is a call (typically to do a throw which 169 // is coming at the end after block reordering) the return address 170 // must still point into the code area in order to avoid assertion 171 // failures when searching for the corresponding bci => add a nop 172 // (was bug 5/14/1999 - gri). 173 __ nop(); 174 175 // Generate code for the exception handler. 176 address handler_base = __ start_a_stub(exception_handler_size()); 177 178 if (handler_base == NULL) { 179 // Not enough space left for the handler. 180 bailout("exception handler overflow"); 181 return -1; 182 } 183 184 int offset = code_offset(); 185 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 186 //__ load_const_optimized(R0, entry_point); 187 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 188 __ mtctr(R0); 189 __ bctr(); 190 191 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 192 __ end_a_stub(); 193 194 return offset; 195 } 196 197 198 // Emit the code to remove the frame from the stack in the exception 199 // unwind path. 200 int LIR_Assembler::emit_unwind_handler() { 201 _masm->block_comment("Unwind handler"); 202 203 int offset = code_offset(); 204 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 205 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 206 207 // Fetch the exception from TLS and clear out exception related thread state. 208 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 209 __ li(R0, 0); 210 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 211 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 212 213 __ bind(_unwind_handler_entry); 214 __ verify_not_null_oop(Rexception); 215 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 216 217 // Perform needed unlocking 218 MonitorExitStub* stub = NULL; 219 if (method()->is_synchronized()) { 220 monitor_address(0, FrameMap::R4_opr); 221 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 222 __ unlock_object(R5, R6, R4, *stub->entry()); 223 __ bind(*stub->continuation()); 224 } 225 226 if (compilation()->env()->dtrace_method_probes()) { 227 Unimplemented(); 228 } 229 230 // Dispatch to the unwind logic. 231 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 232 //__ load_const_optimized(R0, unwind_stub); 233 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 234 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 235 __ mtctr(R0); 236 __ bctr(); 237 238 // Emit the slow path assembly. 239 if (stub != NULL) { 240 stub->emit_code(this); 241 } 242 243 return offset; 244 } 245 246 247 int LIR_Assembler::emit_deopt_handler() { 248 // If the last instruction is a call (typically to do a throw which 249 // is coming at the end after block reordering) the return address 250 // must still point into the code area in order to avoid assertion 251 // failures when searching for the corresponding bci => add a nop 252 // (was bug 5/14/1999 - gri). 253 __ nop(); 254 255 // Generate code for deopt handler. 256 address handler_base = __ start_a_stub(deopt_handler_size()); 257 258 if (handler_base == NULL) { 259 // Not enough space left for the handler. 260 bailout("deopt handler overflow"); 261 return -1; 262 } 263 264 int offset = code_offset(); 265 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 266 267 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 268 __ end_a_stub(); 269 270 return offset; 271 } 272 273 274 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 275 if (o == NULL) { 276 __ li(reg, 0); 277 } else { 278 AddressLiteral addrlit = __ constant_oop_address(o); 279 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 280 } 281 } 282 283 284 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 285 // Allocate a new index in table to hold the object once it's been patched. 286 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 287 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 288 289 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 290 __ load_const(reg, addrlit, R0); 291 292 patching_epilog(patch, lir_patch_normal, reg, info); 293 } 294 295 296 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 297 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 298 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 299 } 300 301 302 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 303 // Allocate a new index in table to hold the klass once it's been patched. 304 int index = __ oop_recorder()->allocate_metadata_index(NULL); 305 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 306 307 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 308 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 309 __ load_const(reg, addrlit, R0); 310 311 patching_epilog(patch, lir_patch_normal, reg, info); 312 } 313 314 315 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 316 const bool is_int = result->is_single_cpu(); 317 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 318 Register Rdivisor = noreg; 319 Register Rscratch = temp->as_register(); 320 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 321 long divisor = -1; 322 323 if (right->is_register()) { 324 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 325 } else { 326 divisor = is_int ? right->as_constant_ptr()->as_jint() 327 : right->as_constant_ptr()->as_jlong(); 328 } 329 330 assert(Rdividend != Rscratch, ""); 331 assert(Rdivisor != Rscratch, ""); 332 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 333 334 if (Rdivisor == noreg) { 335 if (divisor == 1) { // stupid, but can happen 336 if (code == lir_idiv) { 337 __ mr_if_needed(Rresult, Rdividend); 338 } else { 339 __ li(Rresult, 0); 340 } 341 342 } else if (is_power_of_2(divisor)) { 343 // Convert division by a power of two into some shifts and logical operations. 344 int log2 = log2_intptr(divisor); 345 346 // Round towards 0. 347 if (divisor == 2) { 348 if (is_int) { 349 __ srwi(Rscratch, Rdividend, 31); 350 } else { 351 __ srdi(Rscratch, Rdividend, 63); 352 } 353 } else { 354 if (is_int) { 355 __ srawi(Rscratch, Rdividend, 31); 356 } else { 357 __ sradi(Rscratch, Rdividend, 63); 358 } 359 __ clrldi(Rscratch, Rscratch, 64-log2); 360 } 361 __ add(Rscratch, Rdividend, Rscratch); 362 363 if (code == lir_idiv) { 364 if (is_int) { 365 __ srawi(Rresult, Rscratch, log2); 366 } else { 367 __ sradi(Rresult, Rscratch, log2); 368 } 369 } else { // lir_irem 370 __ clrrdi(Rscratch, Rscratch, log2); 371 __ sub(Rresult, Rdividend, Rscratch); 372 } 373 374 } else if (divisor == -1) { 375 if (code == lir_idiv) { 376 __ neg(Rresult, Rdividend); 377 } else { 378 __ li(Rresult, 0); 379 } 380 381 } else { 382 __ load_const_optimized(Rscratch, divisor); 383 if (code == lir_idiv) { 384 if (is_int) { 385 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 386 } else { 387 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 388 } 389 } else { 390 assert(Rscratch != R0, "need both"); 391 if (is_int) { 392 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 393 __ mullw(Rscratch, R0, Rscratch); 394 } else { 395 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 396 __ mulld(Rscratch, R0, Rscratch); 397 } 398 __ sub(Rresult, Rdividend, Rscratch); 399 } 400 401 } 402 return; 403 } 404 405 Label regular, done; 406 if (is_int) { 407 __ cmpwi(CCR0, Rdivisor, -1); 408 } else { 409 __ cmpdi(CCR0, Rdivisor, -1); 410 } 411 __ bne(CCR0, regular); 412 if (code == lir_idiv) { 413 __ neg(Rresult, Rdividend); 414 __ b(done); 415 __ bind(regular); 416 if (is_int) { 417 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 418 } else { 419 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 420 } 421 } else { // lir_irem 422 __ li(Rresult, 0); 423 __ b(done); 424 __ bind(regular); 425 if (is_int) { 426 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 427 __ mullw(Rscratch, Rscratch, Rdivisor); 428 } else { 429 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 430 __ mulld(Rscratch, Rscratch, Rdivisor); 431 } 432 __ sub(Rresult, Rdividend, Rscratch); 433 } 434 __ bind(done); 435 } 436 437 438 void LIR_Assembler::emit_op3(LIR_Op3* op) { 439 switch (op->code()) { 440 case lir_idiv: 441 case lir_irem: 442 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 443 op->result_opr(), op->info()); 444 break; 445 case lir_fmad: 446 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 447 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 448 break; 449 case lir_fmaf: 450 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 451 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 452 break; 453 default: ShouldNotReachHere(); break; 454 } 455 } 456 457 458 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 459 #ifdef ASSERT 460 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 461 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 462 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 463 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 464 #endif 465 466 Label *L = op->label(); 467 if (op->cond() == lir_cond_always) { 468 __ b(*L); 469 } else { 470 Label done; 471 bool is_unordered = false; 472 if (op->code() == lir_cond_float_branch) { 473 assert(op->ublock() != NULL, "must have unordered successor"); 474 is_unordered = true; 475 } else { 476 assert(op->code() == lir_branch, "just checking"); 477 } 478 479 bool positive = false; 480 Assembler::Condition cond = Assembler::equal; 481 switch (op->cond()) { 482 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 483 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 484 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 485 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 486 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 487 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 488 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 489 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 490 default: ShouldNotReachHere(); 491 } 492 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 493 int bi = Assembler::bi0(BOOL_RESULT, cond); 494 if (is_unordered) { 495 if (positive) { 496 if (op->ublock() == op->block()) { 497 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 498 } 499 } else { 500 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 501 } 502 } 503 __ bc_far_optimized(bo, bi, *L); 504 __ bind(done); 505 } 506 } 507 508 509 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 510 Bytecodes::Code code = op->bytecode(); 511 LIR_Opr src = op->in_opr(), 512 dst = op->result_opr(); 513 514 switch(code) { 515 case Bytecodes::_i2l: { 516 __ extsw(dst->as_register_lo(), src->as_register()); 517 break; 518 } 519 case Bytecodes::_l2i: { 520 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 521 break; 522 } 523 case Bytecodes::_i2b: { 524 __ extsb(dst->as_register(), src->as_register()); 525 break; 526 } 527 case Bytecodes::_i2c: { 528 __ clrldi(dst->as_register(), src->as_register(), 64-16); 529 break; 530 } 531 case Bytecodes::_i2s: { 532 __ extsh(dst->as_register(), src->as_register()); 533 break; 534 } 535 case Bytecodes::_i2d: 536 case Bytecodes::_l2d: { 537 bool src_in_memory = !VM_Version::has_mtfprd(); 538 FloatRegister rdst = dst->as_double_reg(); 539 FloatRegister rsrc; 540 if (src_in_memory) { 541 rsrc = src->as_double_reg(); // via mem 542 } else { 543 // move src to dst register 544 if (code == Bytecodes::_i2d) { 545 __ mtfprwa(rdst, src->as_register()); 546 } else { 547 __ mtfprd(rdst, src->as_register_lo()); 548 } 549 rsrc = rdst; 550 } 551 __ fcfid(rdst, rsrc); 552 break; 553 } 554 case Bytecodes::_i2f: 555 case Bytecodes::_l2f: { 556 bool src_in_memory = !VM_Version::has_mtfprd(); 557 FloatRegister rdst = dst->as_float_reg(); 558 FloatRegister rsrc; 559 if (src_in_memory) { 560 rsrc = src->as_double_reg(); // via mem 561 } else { 562 // move src to dst register 563 if (code == Bytecodes::_i2f) { 564 __ mtfprwa(rdst, src->as_register()); 565 } else { 566 __ mtfprd(rdst, src->as_register_lo()); 567 } 568 rsrc = rdst; 569 } 570 if (VM_Version::has_fcfids()) { 571 __ fcfids(rdst, rsrc); 572 } else { 573 assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 574 __ fcfid(rdst, rsrc); 575 __ frsp(rdst, rdst); 576 } 577 break; 578 } 579 case Bytecodes::_f2d: { 580 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 581 break; 582 } 583 case Bytecodes::_d2f: { 584 __ frsp(dst->as_float_reg(), src->as_double_reg()); 585 break; 586 } 587 case Bytecodes::_d2i: 588 case Bytecodes::_f2i: { 589 bool dst_in_memory = !VM_Version::has_mtfprd(); 590 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 591 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 592 Label L; 593 // Result must be 0 if value is NaN; test by comparing value to itself. 594 __ fcmpu(CCR0, rsrc, rsrc); 595 if (dst_in_memory) { 596 __ li(R0, 0); // 0 in case of NAN 597 __ std(R0, addr.disp(), addr.base()); 598 } else { 599 __ li(dst->as_register(), 0); 600 } 601 __ bso(CCR0, L); 602 __ fctiwz(rsrc, rsrc); // USE_KILL 603 if (dst_in_memory) { 604 __ stfd(rsrc, addr.disp(), addr.base()); 605 } else { 606 __ mffprd(dst->as_register(), rsrc); 607 } 608 __ bind(L); 609 break; 610 } 611 case Bytecodes::_d2l: 612 case Bytecodes::_f2l: { 613 bool dst_in_memory = !VM_Version::has_mtfprd(); 614 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 615 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 616 Label L; 617 // Result must be 0 if value is NaN; test by comparing value to itself. 618 __ fcmpu(CCR0, rsrc, rsrc); 619 if (dst_in_memory) { 620 __ li(R0, 0); // 0 in case of NAN 621 __ std(R0, addr.disp(), addr.base()); 622 } else { 623 __ li(dst->as_register_lo(), 0); 624 } 625 __ bso(CCR0, L); 626 __ fctidz(rsrc, rsrc); // USE_KILL 627 if (dst_in_memory) { 628 __ stfd(rsrc, addr.disp(), addr.base()); 629 } else { 630 __ mffprd(dst->as_register_lo(), rsrc); 631 } 632 __ bind(L); 633 break; 634 } 635 636 default: ShouldNotReachHere(); 637 } 638 } 639 640 641 void LIR_Assembler::align_call(LIR_Code) { 642 // do nothing since all instructions are word aligned on ppc 643 } 644 645 646 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 647 int start_offset = __ offset(); 648 // Put the entry point as a constant into the constant pool. 649 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 650 if (entry_point_toc_addr == NULL) { 651 bailout("const section overflow"); 652 return false; 653 } 654 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 655 656 // Emit the trampoline stub which will be related to the branch-and-link below. 657 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 658 if (!stub) { 659 bailout("no space for trampoline stub"); 660 return false; 661 } 662 return true; 663 } 664 665 666 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 667 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 668 669 bool success = emit_trampoline_stub_for_call(op->addr()); 670 if (!success) { return; } 671 672 __ relocate(rtype); 673 // Note: At this point we do not have the address of the trampoline 674 // stub, and the entry point might be too far away for bl, so __ pc() 675 // serves as dummy and the bl will be patched later. 676 __ code()->set_insts_mark(); 677 __ bl(__ pc()); 678 add_call_info(code_offset(), op->info()); 679 } 680 681 682 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 683 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 684 685 // Virtual call relocation will point to ic load. 686 address virtual_call_meta_addr = __ pc(); 687 // Load a clear inline cache. 688 AddressLiteral empty_ic((address) Universe::non_oop_word()); 689 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 690 if (!success) { 691 bailout("const section overflow"); 692 return; 693 } 694 // Call to fixup routine. Fixup routine uses ScopeDesc info 695 // to determine who we intended to call. 696 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 697 698 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 699 if (!success) { return; } 700 701 // Note: At this point we do not have the address of the trampoline 702 // stub, and the entry point might be too far away for bl, so __ pc() 703 // serves as dummy and the bl will be patched later. 704 __ bl(__ pc()); 705 add_call_info(code_offset(), op->info()); 706 } 707 708 709 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 710 ShouldNotReachHere(); // ic_call is used instead. 711 } 712 713 714 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 715 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 716 __ null_check(addr, stub->entry()); 717 append_code_stub(stub); 718 } 719 720 721 // Attention: caller must encode oop if needed 722 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 723 int store_offset; 724 if (!Assembler::is_simm16(offset)) { 725 // For offsets larger than a simm16 we setup the offset. 726 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 727 __ load_const_optimized(R0, offset); 728 store_offset = store(from_reg, base, R0, type, wide); 729 } else { 730 store_offset = code_offset(); 731 switch (type) { 732 case T_BOOLEAN: // fall through 733 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 734 case T_CHAR : 735 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 736 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 737 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 738 case T_ADDRESS: 739 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 740 case T_ARRAY : // fall through 741 case T_OBJECT: 742 { 743 if (UseCompressedOops && !wide) { 744 // Encoding done in caller 745 __ stw(from_reg->as_register(), offset, base); 746 __ verify_coop(from_reg->as_register(), FILE_AND_LINE); 747 } else { 748 __ std(from_reg->as_register(), offset, base); 749 __ verify_oop(from_reg->as_register(), FILE_AND_LINE); 750 } 751 break; 752 } 753 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 754 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 755 default : ShouldNotReachHere(); 756 } 757 } 758 return store_offset; 759 } 760 761 762 // Attention: caller must encode oop if needed 763 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 764 int store_offset = code_offset(); 765 switch (type) { 766 case T_BOOLEAN: // fall through 767 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 768 case T_CHAR : 769 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 770 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 771 case T_LONG : 772 #ifdef _LP64 773 __ stdx(from_reg->as_register_lo(), base, disp); 774 #else 775 Unimplemented(); 776 #endif 777 break; 778 case T_ADDRESS: 779 __ stdx(from_reg->as_register(), base, disp); 780 break; 781 case T_ARRAY : // fall through 782 case T_OBJECT: 783 { 784 if (UseCompressedOops && !wide) { 785 // Encoding done in caller. 786 __ stwx(from_reg->as_register(), base, disp); 787 __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0 788 } else { 789 __ stdx(from_reg->as_register(), base, disp); 790 __ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0 791 } 792 break; 793 } 794 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 795 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 796 default : ShouldNotReachHere(); 797 } 798 return store_offset; 799 } 800 801 802 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 803 int load_offset; 804 if (!Assembler::is_simm16(offset)) { 805 // For offsets larger than a simm16 we setup the offset. 806 __ load_const_optimized(R0, offset); 807 load_offset = load(base, R0, to_reg, type, wide); 808 } else { 809 load_offset = code_offset(); 810 switch(type) { 811 case T_BOOLEAN: // fall through 812 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 813 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 814 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 815 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 816 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 817 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 818 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 819 case T_ADDRESS: 820 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 821 __ lwz(to_reg->as_register(), offset, base); 822 __ decode_klass_not_null(to_reg->as_register()); 823 } else { 824 __ ld(to_reg->as_register(), offset, base); 825 } 826 break; 827 case T_ARRAY : // fall through 828 case T_OBJECT: 829 { 830 if (UseCompressedOops && !wide) { 831 __ lwz(to_reg->as_register(), offset, base); 832 __ decode_heap_oop(to_reg->as_register()); 833 } else { 834 __ ld(to_reg->as_register(), offset, base); 835 } 836 // Emitting oop verification here makes the code exceed the 837 // allowed size for PatchingStubs. 838 // __ verify_oop(to_reg->as_register(), FILE_AND_LINE); 839 break; 840 } 841 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 842 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 843 default : ShouldNotReachHere(); 844 } 845 } 846 return load_offset; 847 } 848 849 850 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 851 int load_offset = code_offset(); 852 switch(type) { 853 case T_BOOLEAN: // fall through 854 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 855 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 856 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 857 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 858 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 859 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 860 case T_ARRAY : // fall through 861 case T_OBJECT: 862 { 863 if (UseCompressedOops && !wide) { 864 __ lwzx(to_reg->as_register(), base, disp); 865 __ decode_heap_oop(to_reg->as_register()); 866 } else { 867 __ ldx(to_reg->as_register(), base, disp); 868 } 869 // Emitting oop verification here makes the code exceed the 870 // allowed size for PatchingStubs. 871 //__ verify_oop(to_reg->as_register(), FILE_AND_LINE); 872 break; 873 } 874 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 875 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 876 case T_LONG : 877 #ifdef _LP64 878 __ ldx(to_reg->as_register_lo(), base, disp); 879 #else 880 Unimplemented(); 881 #endif 882 break; 883 default : ShouldNotReachHere(); 884 } 885 return load_offset; 886 } 887 888 889 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 890 LIR_Const* c = src->as_constant_ptr(); 891 Register src_reg = R0; 892 switch (c->type()) { 893 case T_INT: 894 case T_FLOAT: { 895 int value = c->as_jint_bits(); 896 __ load_const_optimized(src_reg, value); 897 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 898 __ stw(src_reg, addr.disp(), addr.base()); 899 break; 900 } 901 case T_ADDRESS: { 902 int value = c->as_jint_bits(); 903 __ load_const_optimized(src_reg, value); 904 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 905 __ std(src_reg, addr.disp(), addr.base()); 906 break; 907 } 908 case T_OBJECT: { 909 jobject2reg(c->as_jobject(), src_reg); 910 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 911 __ std(src_reg, addr.disp(), addr.base()); 912 break; 913 } 914 case T_LONG: 915 case T_DOUBLE: { 916 int value = c->as_jlong_bits(); 917 __ load_const_optimized(src_reg, value); 918 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 919 __ std(src_reg, addr.disp(), addr.base()); 920 break; 921 } 922 default: 923 Unimplemented(); 924 } 925 } 926 927 928 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 929 LIR_Const* c = src->as_constant_ptr(); 930 LIR_Address* addr = dest->as_address_ptr(); 931 Register base = addr->base()->as_pointer_register(); 932 LIR_Opr tmp = LIR_OprFact::illegalOpr; 933 int offset = -1; 934 // Null check for large offsets in LIRGenerator::do_StoreField. 935 bool needs_explicit_null_check = !ImplicitNullChecks; 936 937 if (info != NULL && needs_explicit_null_check) { 938 explicit_null_check(base, info); 939 } 940 941 switch (c->type()) { 942 case T_FLOAT: type = T_INT; 943 case T_INT: 944 case T_ADDRESS: { 945 tmp = FrameMap::R0_opr; 946 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 947 break; 948 } 949 case T_DOUBLE: type = T_LONG; 950 case T_LONG: { 951 tmp = FrameMap::R0_long_opr; 952 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 953 break; 954 } 955 case T_OBJECT: { 956 tmp = FrameMap::R0_opr; 957 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 958 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 959 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 960 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 961 __ ori(R0, R0, oop_addr.value() & 0xffff); 962 } else { 963 jobject2reg(c->as_jobject(), R0); 964 } 965 break; 966 } 967 default: 968 Unimplemented(); 969 } 970 971 // Handle either reg+reg or reg+disp address. 972 if (addr->index()->is_valid()) { 973 assert(addr->disp() == 0, "must be zero"); 974 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 975 } else { 976 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 977 offset = store(tmp, base, addr->disp(), type, wide, false); 978 } 979 980 if (info != NULL) { 981 assert(offset != -1, "offset should've been set"); 982 if (!needs_explicit_null_check) { 983 add_debug_info_for_null_check(offset, info); 984 } 985 } 986 } 987 988 989 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 990 LIR_Const* c = src->as_constant_ptr(); 991 LIR_Opr to_reg = dest; 992 993 switch (c->type()) { 994 case T_INT: { 995 assert(patch_code == lir_patch_none, "no patching handled here"); 996 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 997 break; 998 } 999 case T_ADDRESS: { 1000 assert(patch_code == lir_patch_none, "no patching handled here"); 1001 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 1002 break; 1003 } 1004 case T_LONG: { 1005 assert(patch_code == lir_patch_none, "no patching handled here"); 1006 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 1007 break; 1008 } 1009 1010 case T_OBJECT: { 1011 if (patch_code == lir_patch_none) { 1012 jobject2reg(c->as_jobject(), to_reg->as_register()); 1013 } else { 1014 jobject2reg_with_patching(to_reg->as_register(), info); 1015 } 1016 break; 1017 } 1018 1019 case T_METADATA: 1020 { 1021 if (patch_code == lir_patch_none) { 1022 metadata2reg(c->as_metadata(), to_reg->as_register()); 1023 } else { 1024 klass2reg_with_patching(to_reg->as_register(), info); 1025 } 1026 } 1027 break; 1028 1029 case T_FLOAT: 1030 { 1031 if (to_reg->is_single_fpu()) { 1032 address const_addr = __ float_constant(c->as_jfloat()); 1033 if (const_addr == NULL) { 1034 bailout("const section overflow"); 1035 break; 1036 } 1037 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1038 __ relocate(rspec); 1039 __ load_const(R0, const_addr); 1040 __ lfsx(to_reg->as_float_reg(), R0); 1041 } else { 1042 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1043 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 1044 } 1045 } 1046 break; 1047 1048 case T_DOUBLE: 1049 { 1050 if (to_reg->is_double_fpu()) { 1051 address const_addr = __ double_constant(c->as_jdouble()); 1052 if (const_addr == NULL) { 1053 bailout("const section overflow"); 1054 break; 1055 } 1056 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1057 __ relocate(rspec); 1058 __ load_const(R0, const_addr); 1059 __ lfdx(to_reg->as_double_reg(), R0); 1060 } else { 1061 assert(to_reg->is_double_cpu(), "Must be a long register."); 1062 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 1063 } 1064 } 1065 break; 1066 1067 default: 1068 ShouldNotReachHere(); 1069 } 1070 } 1071 1072 1073 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1074 Unimplemented(); return Address(); 1075 } 1076 1077 1078 inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1079 if (addr->index()->is_illegal()) { 1080 return (RegisterOrConstant)(addr->disp()); 1081 } else { 1082 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1083 } 1084 } 1085 1086 1087 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1088 const Register tmp = R0; 1089 switch (type) { 1090 case T_INT: 1091 case T_FLOAT: { 1092 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1093 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1094 __ lwz(tmp, from.disp(), from.base()); 1095 __ stw(tmp, to.disp(), to.base()); 1096 break; 1097 } 1098 case T_ADDRESS: 1099 case T_OBJECT: { 1100 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1101 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1102 __ ld(tmp, from.disp(), from.base()); 1103 __ std(tmp, to.disp(), to.base()); 1104 break; 1105 } 1106 case T_LONG: 1107 case T_DOUBLE: { 1108 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1109 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1110 __ ld(tmp, from.disp(), from.base()); 1111 __ std(tmp, to.disp(), to.base()); 1112 break; 1113 } 1114 1115 default: 1116 ShouldNotReachHere(); 1117 } 1118 } 1119 1120 1121 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1122 Unimplemented(); return Address(); 1123 } 1124 1125 1126 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1127 Unimplemented(); return Address(); 1128 } 1129 1130 1131 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1132 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1133 1134 assert(type != T_METADATA, "load of metadata ptr not supported"); 1135 LIR_Address* addr = src_opr->as_address_ptr(); 1136 LIR_Opr to_reg = dest; 1137 1138 Register src = addr->base()->as_pointer_register(); 1139 Register disp_reg = noreg; 1140 int disp_value = addr->disp(); 1141 bool needs_patching = (patch_code != lir_patch_none); 1142 // null check for large offsets in LIRGenerator::do_LoadField 1143 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1144 1145 if (info != NULL && needs_explicit_null_check) { 1146 explicit_null_check(src, info); 1147 } 1148 1149 if (addr->base()->type() == T_OBJECT) { 1150 __ verify_oop(src, FILE_AND_LINE); 1151 } 1152 1153 PatchingStub* patch = NULL; 1154 if (needs_patching) { 1155 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1156 assert(!to_reg->is_double_cpu() || 1157 patch_code == lir_patch_none || 1158 patch_code == lir_patch_normal, "patching doesn't match register"); 1159 } 1160 1161 if (addr->index()->is_illegal()) { 1162 if (!Assembler::is_simm16(disp_value)) { 1163 if (needs_patching) { 1164 __ load_const32(R0, 0); // patchable int 1165 } else { 1166 __ load_const_optimized(R0, disp_value); 1167 } 1168 disp_reg = R0; 1169 } 1170 } else { 1171 disp_reg = addr->index()->as_pointer_register(); 1172 assert(disp_value == 0, "can't handle 3 operand addresses"); 1173 } 1174 1175 // Remember the offset of the load. The patching_epilog must be done 1176 // before the call to add_debug_info, otherwise the PcDescs don't get 1177 // entered in increasing order. 1178 int offset; 1179 1180 if (disp_reg == noreg) { 1181 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1182 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1183 } else { 1184 assert(!unaligned, "unexpected"); 1185 offset = load(src, disp_reg, to_reg, type, wide); 1186 } 1187 1188 if (patch != NULL) { 1189 patching_epilog(patch, patch_code, src, info); 1190 } 1191 if (info != NULL && !needs_explicit_null_check) { 1192 add_debug_info_for_null_check(offset, info); 1193 } 1194 } 1195 1196 1197 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1198 Address addr; 1199 if (src->is_single_word()) { 1200 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1201 } else if (src->is_double_word()) { 1202 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1203 } 1204 1205 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1206 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1207 } 1208 1209 1210 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1211 Address addr; 1212 if (dest->is_single_word()) { 1213 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1214 } else if (dest->is_double_word()) { 1215 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1216 } 1217 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1218 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1219 } 1220 1221 1222 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1223 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1224 if (from_reg->is_double_fpu()) { 1225 // double to double moves 1226 assert(to_reg->is_double_fpu(), "should match"); 1227 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1228 } else { 1229 // float to float moves 1230 assert(to_reg->is_single_fpu(), "should match"); 1231 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1232 } 1233 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1234 if (from_reg->is_double_cpu()) { 1235 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1236 } else if (to_reg->is_double_cpu()) { 1237 // int to int moves 1238 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1239 } else { 1240 // int to int moves 1241 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1242 } 1243 } else { 1244 ShouldNotReachHere(); 1245 } 1246 if (is_reference_type(to_reg->type())) { 1247 __ verify_oop(to_reg->as_register(), FILE_AND_LINE); 1248 } 1249 } 1250 1251 1252 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1253 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1254 bool wide, bool unaligned) { 1255 assert(type != T_METADATA, "store of metadata ptr not supported"); 1256 LIR_Address* addr = dest->as_address_ptr(); 1257 1258 Register src = addr->base()->as_pointer_register(); 1259 Register disp_reg = noreg; 1260 int disp_value = addr->disp(); 1261 bool needs_patching = (patch_code != lir_patch_none); 1262 bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide && 1263 CompressedOops::mode() != CompressedOops::UnscaledNarrowOop; 1264 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1265 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1266 // Null check for large offsets in LIRGenerator::do_StoreField. 1267 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1268 1269 if (info != NULL && needs_explicit_null_check) { 1270 explicit_null_check(src, info); 1271 } 1272 1273 if (addr->base()->is_oop_register()) { 1274 __ verify_oop(src, FILE_AND_LINE); 1275 } 1276 1277 PatchingStub* patch = NULL; 1278 if (needs_patching) { 1279 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1280 assert(!from_reg->is_double_cpu() || 1281 patch_code == lir_patch_none || 1282 patch_code == lir_patch_normal, "patching doesn't match register"); 1283 } 1284 1285 if (addr->index()->is_illegal()) { 1286 if (load_disp) { 1287 disp_reg = use_R29 ? R29_TOC : R0; 1288 if (needs_patching) { 1289 __ load_const32(disp_reg, 0); // patchable int 1290 } else { 1291 __ load_const_optimized(disp_reg, disp_value); 1292 } 1293 } 1294 } else { 1295 disp_reg = addr->index()->as_pointer_register(); 1296 assert(disp_value == 0, "can't handle 3 operand addresses"); 1297 } 1298 1299 // remember the offset of the store. The patching_epilog must be done 1300 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1301 // entered in increasing order. 1302 int offset; 1303 1304 if (compress_oop) { 1305 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1306 from_reg = FrameMap::as_opr(co); 1307 } 1308 1309 if (disp_reg == noreg) { 1310 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1311 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1312 } else { 1313 assert(!unaligned, "unexpected"); 1314 offset = store(from_reg, src, disp_reg, type, wide); 1315 } 1316 1317 if (use_R29) { 1318 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1319 } 1320 1321 if (patch != NULL) { 1322 patching_epilog(patch, patch_code, src, info); 1323 } 1324 1325 if (info != NULL && !needs_explicit_null_check) { 1326 add_debug_info_for_null_check(offset, info); 1327 } 1328 } 1329 1330 1331 void LIR_Assembler::return_op(LIR_Opr result) { 1332 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1333 const Register polling_page = R12; 1334 1335 // Pop the stack before the safepoint code. 1336 int frame_size = initial_frame_size_in_bytes(); 1337 if (Assembler::is_simm(frame_size, 16)) { 1338 __ addi(R1_SP, R1_SP, frame_size); 1339 } else { 1340 __ pop_frame(); 1341 } 1342 1343 if (SafepointMechanism::uses_thread_local_poll()) { 1344 __ ld(polling_page, in_bytes(Thread::polling_page_offset()), R16_thread); 1345 } else { 1346 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); 1347 } 1348 1349 // Restore return pc relative to callers' sp. 1350 __ ld(return_pc, _abi(lr), R1_SP); 1351 // Move return pc to LR. 1352 __ mtlr(return_pc); 1353 1354 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1355 __ reserved_stack_check(return_pc); 1356 } 1357 1358 // We need to mark the code position where the load from the safepoint 1359 // polling page was emitted as relocInfo::poll_return_type here. 1360 __ relocate(relocInfo::poll_return_type); 1361 __ load_from_polling_page(polling_page); 1362 1363 // Return. 1364 __ blr(); 1365 } 1366 1367 1368 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1369 const Register poll_addr = tmp->as_register(); 1370 if (SafepointMechanism::uses_thread_local_poll()) { 1371 __ ld(poll_addr, in_bytes(Thread::polling_page_offset()), R16_thread); 1372 } else { 1373 __ load_const_optimized(poll_addr, (intptr_t)os::get_polling_page(), R0); 1374 } 1375 if (info != NULL) { 1376 add_debug_info_for_branch(info); 1377 } 1378 int offset = __ offset(); 1379 __ relocate(relocInfo::poll_type); 1380 __ load_from_polling_page(poll_addr); 1381 1382 return offset; 1383 } 1384 1385 1386 void LIR_Assembler::emit_static_call_stub() { 1387 address call_pc = __ pc(); 1388 address stub = __ start_a_stub(static_call_stub_size()); 1389 if (stub == NULL) { 1390 bailout("static call stub overflow"); 1391 return; 1392 } 1393 1394 // For java_to_interp stubs we use R11_scratch1 as scratch register 1395 // and in call trampoline stubs we use R12_scratch2. This way we 1396 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1397 const Register reg_scratch = R11_scratch1; 1398 1399 // Create a static stub relocation which relates this stub 1400 // with the call instruction at insts_call_instruction_offset in the 1401 // instructions code-section. 1402 int start = __ offset(); 1403 __ relocate(static_stub_Relocation::spec(call_pc)); 1404 1405 // Now, create the stub's code: 1406 // - load the TOC 1407 // - load the inline cache oop from the constant pool 1408 // - load the call target from the constant pool 1409 // - call 1410 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1411 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1412 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1413 1414 if (ReoptimizeCallSequences) { 1415 __ b64_patchable((address)-1, relocInfo::none); 1416 } else { 1417 AddressLiteral a((address)-1); 1418 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1419 __ mtctr(reg_scratch); 1420 __ bctr(); 1421 } 1422 if (!success) { 1423 bailout("const section overflow"); 1424 return; 1425 } 1426 1427 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1428 __ end_a_stub(); 1429 } 1430 1431 1432 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1433 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1434 if (opr1->is_single_fpu()) { 1435 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1436 } else if (opr1->is_double_fpu()) { 1437 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1438 } else if (opr1->is_single_cpu()) { 1439 if (opr2->is_constant()) { 1440 switch (opr2->as_constant_ptr()->type()) { 1441 case T_INT: 1442 { 1443 jint con = opr2->as_constant_ptr()->as_jint(); 1444 if (unsigned_comp) { 1445 if (Assembler::is_uimm(con, 16)) { 1446 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1447 } else { 1448 __ load_const_optimized(R0, con); 1449 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1450 } 1451 } else { 1452 if (Assembler::is_simm(con, 16)) { 1453 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1454 } else { 1455 __ load_const_optimized(R0, con); 1456 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1457 } 1458 } 1459 } 1460 break; 1461 1462 case T_OBJECT: 1463 // There are only equal/notequal comparisons on objects. 1464 { 1465 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1466 jobject con = opr2->as_constant_ptr()->as_jobject(); 1467 if (con == NULL) { 1468 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1469 } else { 1470 jobject2reg(con, R0); 1471 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1472 } 1473 } 1474 break; 1475 1476 default: 1477 ShouldNotReachHere(); 1478 break; 1479 } 1480 } else { 1481 assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported"); 1482 if (is_reference_type(opr1->type())) { 1483 // There are only equal/notequal comparisons on objects. 1484 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1485 __ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1486 } else { 1487 if (unsigned_comp) { 1488 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1489 } else { 1490 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1491 } 1492 } 1493 } 1494 } else if (opr1->is_double_cpu()) { 1495 if (opr2->is_constant()) { 1496 jlong con = opr2->as_constant_ptr()->as_jlong(); 1497 if (unsigned_comp) { 1498 if (Assembler::is_uimm(con, 16)) { 1499 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1500 } else { 1501 __ load_const_optimized(R0, con); 1502 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1503 } 1504 } else { 1505 if (Assembler::is_simm(con, 16)) { 1506 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1507 } else { 1508 __ load_const_optimized(R0, con); 1509 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1510 } 1511 } 1512 } else if (opr2->is_register()) { 1513 if (unsigned_comp) { 1514 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1515 } else { 1516 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1517 } 1518 } else { 1519 ShouldNotReachHere(); 1520 } 1521 } else { 1522 ShouldNotReachHere(); 1523 } 1524 } 1525 1526 1527 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1528 const Register Rdst = dst->as_register(); 1529 Label done; 1530 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1531 bool is_unordered_less = (code == lir_ucmp_fd2i); 1532 if (left->is_single_fpu()) { 1533 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1534 } else if (left->is_double_fpu()) { 1535 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1536 } else { 1537 ShouldNotReachHere(); 1538 } 1539 __ li(Rdst, is_unordered_less ? -1 : 1); 1540 __ bso(CCR0, done); 1541 } else if (code == lir_cmp_l2i) { 1542 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1543 } else { 1544 ShouldNotReachHere(); 1545 } 1546 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1547 __ srwi(Rdst, R0, 30); 1548 __ srawi(R0, R0, 31); 1549 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1550 __ bind(done); 1551 } 1552 1553 1554 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1555 if (src->is_constant()) { 1556 lasm->const2reg(src, dst, lir_patch_none, NULL); 1557 } else if (src->is_register()) { 1558 lasm->reg2reg(src, dst); 1559 } else if (src->is_stack()) { 1560 lasm->stack2reg(src, dst, dst->type()); 1561 } else { 1562 ShouldNotReachHere(); 1563 } 1564 } 1565 1566 1567 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1568 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1569 load_to_reg(this, opr1, result); // Condition doesn't matter. 1570 return; 1571 } 1572 1573 bool positive = false; 1574 Assembler::Condition cond = Assembler::equal; 1575 switch (condition) { 1576 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1577 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1578 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1579 case lir_cond_belowEqual: 1580 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1581 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1582 case lir_cond_aboveEqual: 1583 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1584 default: ShouldNotReachHere(); 1585 } 1586 1587 // Try to use isel on >=Power7. 1588 if (VM_Version::has_isel() && result->is_cpu_register()) { 1589 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1590 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1591 1592 // We can use result_reg to load one operand if not already in register. 1593 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1594 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1595 1596 if (first != second) { 1597 if (!o1_is_reg) { 1598 load_to_reg(this, opr1, result); 1599 } 1600 1601 if (!o2_is_reg) { 1602 load_to_reg(this, opr2, result); 1603 } 1604 1605 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1606 return; 1607 } 1608 } // isel 1609 1610 load_to_reg(this, opr1, result); 1611 1612 Label skip; 1613 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1614 int bi = Assembler::bi0(BOOL_RESULT, cond); 1615 __ bc(bo, bi, skip); 1616 1617 load_to_reg(this, opr2, result); 1618 __ bind(skip); 1619 } 1620 1621 1622 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1623 CodeEmitInfo* info, bool pop_fpu_stack) { 1624 assert(info == NULL, "unused on this code path"); 1625 assert(left->is_register(), "wrong items state"); 1626 assert(dest->is_register(), "wrong items state"); 1627 1628 if (right->is_register()) { 1629 if (dest->is_float_kind()) { 1630 1631 FloatRegister lreg, rreg, res; 1632 if (right->is_single_fpu()) { 1633 lreg = left->as_float_reg(); 1634 rreg = right->as_float_reg(); 1635 res = dest->as_float_reg(); 1636 switch (code) { 1637 case lir_add: __ fadds(res, lreg, rreg); break; 1638 case lir_sub: __ fsubs(res, lreg, rreg); break; 1639 case lir_mul: // fall through 1640 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1641 case lir_div: // fall through 1642 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1643 default: ShouldNotReachHere(); 1644 } 1645 } else { 1646 lreg = left->as_double_reg(); 1647 rreg = right->as_double_reg(); 1648 res = dest->as_double_reg(); 1649 switch (code) { 1650 case lir_add: __ fadd(res, lreg, rreg); break; 1651 case lir_sub: __ fsub(res, lreg, rreg); break; 1652 case lir_mul: // fall through 1653 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1654 case lir_div: // fall through 1655 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1656 default: ShouldNotReachHere(); 1657 } 1658 } 1659 1660 } else if (dest->is_double_cpu()) { 1661 1662 Register dst_lo = dest->as_register_lo(); 1663 Register op1_lo = left->as_pointer_register(); 1664 Register op2_lo = right->as_pointer_register(); 1665 1666 switch (code) { 1667 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1668 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1669 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1670 default: ShouldNotReachHere(); 1671 } 1672 } else { 1673 assert (right->is_single_cpu(), "Just Checking"); 1674 1675 Register lreg = left->as_register(); 1676 Register res = dest->as_register(); 1677 Register rreg = right->as_register(); 1678 switch (code) { 1679 case lir_add: __ add (res, lreg, rreg); break; 1680 case lir_sub: __ sub (res, lreg, rreg); break; 1681 case lir_mul: __ mullw(res, lreg, rreg); break; 1682 default: ShouldNotReachHere(); 1683 } 1684 } 1685 } else { 1686 assert (right->is_constant(), "must be constant"); 1687 1688 if (dest->is_single_cpu()) { 1689 Register lreg = left->as_register(); 1690 Register res = dest->as_register(); 1691 int simm16 = right->as_constant_ptr()->as_jint(); 1692 1693 switch (code) { 1694 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1695 simm16 = -simm16; 1696 case lir_add: if (res == lreg && simm16 == 0) break; 1697 __ addi(res, lreg, simm16); break; 1698 case lir_mul: if (res == lreg && simm16 == 1) break; 1699 __ mulli(res, lreg, simm16); break; 1700 default: ShouldNotReachHere(); 1701 } 1702 } else { 1703 Register lreg = left->as_pointer_register(); 1704 Register res = dest->as_register_lo(); 1705 long con = right->as_constant_ptr()->as_jlong(); 1706 assert(Assembler::is_simm16(con), "must be simm16"); 1707 1708 switch (code) { 1709 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1710 con = -con; 1711 case lir_add: if (res == lreg && con == 0) break; 1712 __ addi(res, lreg, (int)con); break; 1713 case lir_mul: if (res == lreg && con == 1) break; 1714 __ mulli(res, lreg, (int)con); break; 1715 default: ShouldNotReachHere(); 1716 } 1717 } 1718 } 1719 } 1720 1721 1722 void LIR_Assembler::fpop() { 1723 Unimplemented(); 1724 // do nothing 1725 } 1726 1727 1728 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1729 switch (code) { 1730 case lir_sqrt: { 1731 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1732 break; 1733 } 1734 case lir_abs: { 1735 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1736 break; 1737 } 1738 default: { 1739 ShouldNotReachHere(); 1740 break; 1741 } 1742 } 1743 } 1744 1745 1746 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1747 if (right->is_constant()) { // see do_LogicOp 1748 long uimm; 1749 Register d, l; 1750 if (dest->is_single_cpu()) { 1751 uimm = right->as_constant_ptr()->as_jint(); 1752 d = dest->as_register(); 1753 l = left->as_register(); 1754 } else { 1755 uimm = right->as_constant_ptr()->as_jlong(); 1756 d = dest->as_register_lo(); 1757 l = left->as_register_lo(); 1758 } 1759 long uimms = (unsigned long)uimm >> 16, 1760 uimmss = (unsigned long)uimm >> 32; 1761 1762 switch (code) { 1763 case lir_logic_and: 1764 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1765 __ andi(d, l, uimm); // special cases 1766 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1767 else { __ andi_(d, l, uimm); } 1768 break; 1769 1770 case lir_logic_or: 1771 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1772 else { __ ori(d, l, uimm); } 1773 break; 1774 1775 case lir_logic_xor: 1776 if (uimm == -1) { __ nand(d, l, l); } // special case 1777 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1778 else { __ xori(d, l, uimm); } 1779 break; 1780 1781 default: ShouldNotReachHere(); 1782 } 1783 } else { 1784 assert(right->is_register(), "right should be in register"); 1785 1786 if (dest->is_single_cpu()) { 1787 switch (code) { 1788 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1789 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1790 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1791 default: ShouldNotReachHere(); 1792 } 1793 } else { 1794 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1795 left->as_register_lo(); 1796 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1797 right->as_register_lo(); 1798 1799 switch (code) { 1800 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1801 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1802 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1803 default: ShouldNotReachHere(); 1804 } 1805 } 1806 } 1807 } 1808 1809 1810 int LIR_Assembler::shift_amount(BasicType t) { 1811 int elem_size = type2aelembytes(t); 1812 switch (elem_size) { 1813 case 1 : return 0; 1814 case 2 : return 1; 1815 case 4 : return 2; 1816 case 8 : return 3; 1817 } 1818 ShouldNotReachHere(); 1819 return -1; 1820 } 1821 1822 1823 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1824 info->add_register_oop(exceptionOop); 1825 1826 // Reuse the debug info from the safepoint poll for the throw op itself. 1827 address pc_for_athrow = __ pc(); 1828 int pc_for_athrow_offset = __ offset(); 1829 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1830 //__ relocate(rspec); 1831 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1832 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1833 add_call_info(pc_for_athrow_offset, info); // for exception handler 1834 1835 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1836 : Runtime1::handle_exception_nofpu_id); 1837 //__ load_const_optimized(R0, stub); 1838 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1839 __ mtctr(R0); 1840 __ bctr(); 1841 } 1842 1843 1844 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1845 // Note: Not used with EnableDebuggingOnDemand. 1846 assert(exceptionOop->as_register() == R3, "should match"); 1847 __ b(_unwind_handler_entry); 1848 } 1849 1850 1851 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1852 Register src = op->src()->as_register(); 1853 Register dst = op->dst()->as_register(); 1854 Register src_pos = op->src_pos()->as_register(); 1855 Register dst_pos = op->dst_pos()->as_register(); 1856 Register length = op->length()->as_register(); 1857 Register tmp = op->tmp()->as_register(); 1858 Register tmp2 = R0; 1859 1860 int flags = op->flags(); 1861 ciArrayKlass* default_type = op->expected_type(); 1862 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1863 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1864 1865 // Set up the arraycopy stub information. 1866 ArrayCopyStub* stub = op->stub(); 1867 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1868 1869 // Always do stub if no type information is available. It's ok if 1870 // the known type isn't loaded since the code sanity checks 1871 // in debug mode and the type isn't required when we know the exact type 1872 // also check that the type is an array type. 1873 if (op->expected_type() == NULL) { 1874 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1875 length->is_nonvolatile(), "must preserve"); 1876 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1877 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 1878 1879 // 3 parms are int. Convert to long. 1880 __ mr(R3_ARG1, src); 1881 __ extsw(R4_ARG2, src_pos); 1882 __ mr(R5_ARG3, dst); 1883 __ extsw(R6_ARG4, dst_pos); 1884 __ extsw(R7_ARG5, length); 1885 1886 #ifndef PRODUCT 1887 if (PrintC1Statistics) { 1888 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1889 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1890 __ lwz(R11_scratch1, simm16_offs, tmp); 1891 __ addi(R11_scratch1, R11_scratch1, 1); 1892 __ stw(R11_scratch1, simm16_offs, tmp); 1893 } 1894 #endif 1895 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1896 1897 __ nand(tmp, R3_RET, R3_RET); 1898 __ subf(length, tmp, length); 1899 __ add(src_pos, tmp, src_pos); 1900 __ add(dst_pos, tmp, dst_pos); 1901 1902 __ cmpwi(CCR0, R3_RET, 0); 1903 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1904 __ bind(*stub->continuation()); 1905 return; 1906 } 1907 1908 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1909 Label cont, slow, copyfunc; 1910 1911 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1912 LIR_OpArrayCopy::dst_null_check | 1913 LIR_OpArrayCopy::src_pos_positive_check | 1914 LIR_OpArrayCopy::dst_pos_positive_check | 1915 LIR_OpArrayCopy::length_positive_check); 1916 1917 // Use only one conditional branch for simple checks. 1918 if (simple_check_flag_set) { 1919 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1920 1921 // Make sure src and dst are non-null. 1922 if (flags & LIR_OpArrayCopy::src_null_check) { 1923 __ cmpdi(combined_check, src, 0); 1924 tmp_check = CCR0; 1925 } 1926 1927 if (flags & LIR_OpArrayCopy::dst_null_check) { 1928 __ cmpdi(tmp_check, dst, 0); 1929 if (tmp_check != combined_check) { 1930 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1931 } 1932 tmp_check = CCR0; 1933 } 1934 1935 // Clear combined_check.eq if not already used. 1936 if (tmp_check == combined_check) { 1937 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1938 tmp_check = CCR0; 1939 } 1940 1941 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1942 // Test src_pos register. 1943 __ cmpwi(tmp_check, src_pos, 0); 1944 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1945 } 1946 1947 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1948 // Test dst_pos register. 1949 __ cmpwi(tmp_check, dst_pos, 0); 1950 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1951 } 1952 1953 if (flags & LIR_OpArrayCopy::length_positive_check) { 1954 // Make sure length isn't negative. 1955 __ cmpwi(tmp_check, length, 0); 1956 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1957 } 1958 1959 __ beq(combined_check, slow); 1960 } 1961 1962 // If the compiler was not able to prove that exact type of the source or the destination 1963 // of the arraycopy is an array type, check at runtime if the source or the destination is 1964 // an instance type. 1965 if (flags & LIR_OpArrayCopy::type_check) { 1966 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1967 __ load_klass(tmp, dst); 1968 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1969 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1970 __ bge(CCR0, slow); 1971 } 1972 1973 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1974 __ load_klass(tmp, src); 1975 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1976 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1977 __ bge(CCR0, slow); 1978 } 1979 } 1980 1981 // Higher 32bits must be null. 1982 __ extsw(length, length); 1983 1984 __ extsw(src_pos, src_pos); 1985 if (flags & LIR_OpArrayCopy::src_range_check) { 1986 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1987 __ add(tmp, length, src_pos); 1988 __ cmpld(CCR0, tmp2, tmp); 1989 __ ble(CCR0, slow); 1990 } 1991 1992 __ extsw(dst_pos, dst_pos); 1993 if (flags & LIR_OpArrayCopy::dst_range_check) { 1994 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1995 __ add(tmp, length, dst_pos); 1996 __ cmpld(CCR0, tmp2, tmp); 1997 __ ble(CCR0, slow); 1998 } 1999 2000 int shift = shift_amount(basic_type); 2001 2002 if (!(flags & LIR_OpArrayCopy::type_check)) { 2003 __ b(cont); 2004 } else { 2005 // We don't know the array types are compatible. 2006 if (basic_type != T_OBJECT) { 2007 // Simple test for basic type arrays. 2008 if (UseCompressedClassPointers) { 2009 // We don't need decode because we just need to compare. 2010 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 2011 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2012 __ cmpw(CCR0, tmp, tmp2); 2013 } else { 2014 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 2015 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2016 __ cmpd(CCR0, tmp, tmp2); 2017 } 2018 __ beq(CCR0, cont); 2019 } else { 2020 // For object arrays, if src is a sub class of dst then we can 2021 // safely do the copy. 2022 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2023 2024 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 2025 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 2026 2027 __ load_klass(sub_klass, src); 2028 __ load_klass(super_klass, dst); 2029 2030 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 2031 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 2032 2033 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2034 //__ load_const_optimized(tmp, slow_stc, tmp2); 2035 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 2036 __ mtctr(tmp); 2037 __ bctrl(); // sets CR0 2038 __ beq(CCR0, cont); 2039 2040 if (copyfunc_addr != NULL) { // Use stub if available. 2041 __ bind(copyfunc); 2042 // Src is not a sub class of dst so we have to do a 2043 // per-element check. 2044 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2045 if ((flags & mask) != mask) { 2046 assert(flags & mask, "one of the two should be known to be an object array"); 2047 2048 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2049 __ load_klass(tmp, src); 2050 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2051 __ load_klass(tmp, dst); 2052 } 2053 2054 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2055 2056 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2057 __ load_const_optimized(tmp, objArray_lh); 2058 __ cmpw(CCR0, tmp, tmp2); 2059 __ bne(CCR0, slow); 2060 } 2061 2062 Register src_ptr = R3_ARG1; 2063 Register dst_ptr = R4_ARG2; 2064 Register len = R5_ARG3; 2065 Register chk_off = R6_ARG4; 2066 Register super_k = R7_ARG5; 2067 2068 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2069 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2070 if (shift == 0) { 2071 __ add(src_ptr, src_pos, src_ptr); 2072 __ add(dst_ptr, dst_pos, dst_ptr); 2073 } else { 2074 __ sldi(tmp, src_pos, shift); 2075 __ sldi(tmp2, dst_pos, shift); 2076 __ add(src_ptr, tmp, src_ptr); 2077 __ add(dst_ptr, tmp2, dst_ptr); 2078 } 2079 2080 __ load_klass(tmp, dst); 2081 __ mr(len, length); 2082 2083 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2084 __ ld(super_k, ek_offset, tmp); 2085 2086 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2087 __ lwz(chk_off, sco_offset, super_k); 2088 2089 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2090 2091 #ifndef PRODUCT 2092 if (PrintC1Statistics) { 2093 Label failed; 2094 __ cmpwi(CCR0, R3_RET, 0); 2095 __ bne(CCR0, failed); 2096 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2097 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2098 __ lwz(R11_scratch1, simm16_offs, tmp); 2099 __ addi(R11_scratch1, R11_scratch1, 1); 2100 __ stw(R11_scratch1, simm16_offs, tmp); 2101 __ bind(failed); 2102 } 2103 #endif 2104 2105 __ nand(tmp, R3_RET, R3_RET); 2106 __ cmpwi(CCR0, R3_RET, 0); 2107 __ beq(CCR0, *stub->continuation()); 2108 2109 #ifndef PRODUCT 2110 if (PrintC1Statistics) { 2111 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2112 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2113 __ lwz(R11_scratch1, simm16_offs, tmp); 2114 __ addi(R11_scratch1, R11_scratch1, 1); 2115 __ stw(R11_scratch1, simm16_offs, tmp); 2116 } 2117 #endif 2118 2119 __ subf(length, tmp, length); 2120 __ add(src_pos, tmp, src_pos); 2121 __ add(dst_pos, tmp, dst_pos); 2122 } 2123 } 2124 } 2125 __ bind(slow); 2126 __ b(*stub->entry()); 2127 __ bind(cont); 2128 2129 #ifdef ASSERT 2130 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2131 // Sanity check the known type with the incoming class. For the 2132 // primitive case the types must match exactly with src.klass and 2133 // dst.klass each exactly matching the default type. For the 2134 // object array case, if no type check is needed then either the 2135 // dst type is exactly the expected type and the src type is a 2136 // subtype which we can't check or src is the same array as dst 2137 // but not necessarily exactly of type default_type. 2138 Label known_ok, halt; 2139 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2140 if (UseCompressedClassPointers) { 2141 // Tmp holds the default type. It currently comes uncompressed after the 2142 // load of a constant, so encode it. 2143 __ encode_klass_not_null(tmp); 2144 // Load the raw value of the dst klass, since we will be comparing 2145 // uncompressed values directly. 2146 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2147 __ cmpw(CCR0, tmp, tmp2); 2148 if (basic_type != T_OBJECT) { 2149 __ bne(CCR0, halt); 2150 // Load the raw value of the src klass. 2151 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2152 __ cmpw(CCR0, tmp, tmp2); 2153 __ beq(CCR0, known_ok); 2154 } else { 2155 __ beq(CCR0, known_ok); 2156 __ cmpw(CCR0, src, dst); 2157 __ beq(CCR0, known_ok); 2158 } 2159 } else { 2160 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2161 __ cmpd(CCR0, tmp, tmp2); 2162 if (basic_type != T_OBJECT) { 2163 __ bne(CCR0, halt); 2164 // Load the raw value of the src klass. 2165 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2166 __ cmpd(CCR0, tmp, tmp2); 2167 __ beq(CCR0, known_ok); 2168 } else { 2169 __ beq(CCR0, known_ok); 2170 __ cmpd(CCR0, src, dst); 2171 __ beq(CCR0, known_ok); 2172 } 2173 } 2174 __ bind(halt); 2175 __ stop("incorrect type information in arraycopy"); 2176 __ bind(known_ok); 2177 } 2178 #endif 2179 2180 #ifndef PRODUCT 2181 if (PrintC1Statistics) { 2182 address counter = Runtime1::arraycopy_count_address(basic_type); 2183 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2184 __ lwz(R11_scratch1, simm16_offs, tmp); 2185 __ addi(R11_scratch1, R11_scratch1, 1); 2186 __ stw(R11_scratch1, simm16_offs, tmp); 2187 } 2188 #endif 2189 2190 Register src_ptr = R3_ARG1; 2191 Register dst_ptr = R4_ARG2; 2192 Register len = R5_ARG3; 2193 2194 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2195 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2196 if (shift == 0) { 2197 __ add(src_ptr, src_pos, src_ptr); 2198 __ add(dst_ptr, dst_pos, dst_ptr); 2199 } else { 2200 __ sldi(tmp, src_pos, shift); 2201 __ sldi(tmp2, dst_pos, shift); 2202 __ add(src_ptr, tmp, src_ptr); 2203 __ add(dst_ptr, tmp2, dst_ptr); 2204 } 2205 2206 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2207 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2208 const char *name; 2209 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2210 2211 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2212 __ mr(len, length); 2213 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2214 2215 __ bind(*stub->continuation()); 2216 } 2217 2218 2219 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2220 if (dest->is_single_cpu()) { 2221 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2222 #ifdef _LP64 2223 if (left->type() == T_OBJECT) { 2224 switch (code) { 2225 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2226 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2227 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2228 default: ShouldNotReachHere(); 2229 } 2230 } else 2231 #endif 2232 switch (code) { 2233 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2234 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2235 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2236 default: ShouldNotReachHere(); 2237 } 2238 } else { 2239 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2240 switch (code) { 2241 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2242 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2243 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2244 default: ShouldNotReachHere(); 2245 } 2246 } 2247 } 2248 2249 2250 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2251 #ifdef _LP64 2252 if (left->type() == T_OBJECT) { 2253 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2254 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2255 else { 2256 switch (code) { 2257 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2258 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2259 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2260 default: ShouldNotReachHere(); 2261 } 2262 } 2263 return; 2264 } 2265 #endif 2266 2267 if (dest->is_single_cpu()) { 2268 count = count & 0x1F; // Java spec 2269 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2270 else { 2271 switch (code) { 2272 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2273 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2274 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2275 default: ShouldNotReachHere(); 2276 } 2277 } 2278 } else if (dest->is_double_cpu()) { 2279 count = count & 63; // Java spec 2280 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2281 else { 2282 switch (code) { 2283 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2284 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2285 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2286 default: ShouldNotReachHere(); 2287 } 2288 } 2289 } else { 2290 ShouldNotReachHere(); 2291 } 2292 } 2293 2294 2295 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2296 if (op->init_check()) { 2297 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2298 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2299 } else { 2300 add_debug_info_for_null_check_here(op->stub()->info()); 2301 } 2302 __ lbz(op->tmp1()->as_register(), 2303 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2304 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2305 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2306 } 2307 __ allocate_object(op->obj()->as_register(), 2308 op->tmp1()->as_register(), 2309 op->tmp2()->as_register(), 2310 op->tmp3()->as_register(), 2311 op->header_size(), 2312 op->object_size(), 2313 op->klass()->as_register(), 2314 *op->stub()->entry()); 2315 2316 __ bind(*op->stub()->continuation()); 2317 __ verify_oop(op->obj()->as_register(), FILE_AND_LINE); 2318 } 2319 2320 2321 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2322 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2323 if (UseSlowPath || 2324 (!UseFastNewObjectArray && (is_reference_type(op->type()))) || 2325 (!UseFastNewTypeArray && (!is_reference_type(op->type())))) { 2326 __ b(*op->stub()->entry()); 2327 } else { 2328 __ allocate_array(op->obj()->as_register(), 2329 op->len()->as_register(), 2330 op->tmp1()->as_register(), 2331 op->tmp2()->as_register(), 2332 op->tmp3()->as_register(), 2333 arrayOopDesc::header_size(op->type()), 2334 type2aelembytes(op->type()), 2335 op->klass()->as_register(), 2336 *op->stub()->entry()); 2337 } 2338 __ bind(*op->stub()->continuation()); 2339 } 2340 2341 2342 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2343 ciMethodData *md, ciProfileData *data, 2344 Register recv, Register tmp1, Label* update_done) { 2345 uint i; 2346 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2347 Label next_test; 2348 // See if the receiver is receiver[n]. 2349 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2350 __ verify_klass_ptr(tmp1); 2351 __ cmpd(CCR0, recv, tmp1); 2352 __ bne(CCR0, next_test); 2353 2354 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2355 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2356 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2357 __ b(*update_done); 2358 2359 __ bind(next_test); 2360 } 2361 2362 // Didn't find receiver; find next empty slot and fill it in. 2363 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2364 Label next_test; 2365 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2366 __ cmpdi(CCR0, tmp1, 0); 2367 __ bne(CCR0, next_test); 2368 __ li(tmp1, DataLayout::counter_increment); 2369 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2370 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2371 __ b(*update_done); 2372 2373 __ bind(next_test); 2374 } 2375 } 2376 2377 2378 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2379 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2380 md = method->method_data_or_null(); 2381 assert(md != NULL, "Sanity"); 2382 data = md->bci_to_data(bci); 2383 assert(data != NULL, "need data for checkcast"); 2384 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2385 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2386 // The offset is large so bias the mdo by the base of the slot so 2387 // that the ld can use simm16s to reference the slots of the data. 2388 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2389 } 2390 } 2391 2392 2393 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2394 const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub). 2395 Register k_RInfo = op->tmp1()->as_register(); 2396 Register klass_RInfo = op->tmp2()->as_register(); 2397 Register Rtmp1 = op->tmp3()->as_register(); 2398 Register dst = op->result_opr()->as_register(); 2399 ciKlass* k = op->klass(); 2400 bool should_profile = op->should_profile(); 2401 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2402 bool reg_conflict = false; 2403 if (obj == k_RInfo) { 2404 k_RInfo = dst; 2405 reg_conflict = true; 2406 } else if (obj == klass_RInfo) { 2407 klass_RInfo = dst; 2408 reg_conflict = true; 2409 } else if (obj == Rtmp1) { 2410 Rtmp1 = dst; 2411 reg_conflict = true; 2412 } 2413 assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1); 2414 2415 __ cmpdi(CCR0, obj, 0); 2416 2417 ciMethodData* md = NULL; 2418 ciProfileData* data = NULL; 2419 int mdo_offset_bias = 0; 2420 if (should_profile) { 2421 ciMethod* method = op->profiled_method(); 2422 assert(method != NULL, "Should have method"); 2423 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2424 2425 Register mdo = k_RInfo; 2426 Register data_val = Rtmp1; 2427 Label not_null; 2428 __ bne(CCR0, not_null); 2429 metadata2reg(md->constant_encoding(), mdo); 2430 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2431 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2432 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2433 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2434 __ b(*obj_is_null); 2435 __ bind(not_null); 2436 } else { 2437 __ beq(CCR0, *obj_is_null); 2438 } 2439 2440 // get object class 2441 __ load_klass(klass_RInfo, obj); 2442 2443 if (k->is_loaded()) { 2444 metadata2reg(k->constant_encoding(), k_RInfo); 2445 } else { 2446 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2447 } 2448 2449 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2450 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2451 Label *success_target = should_profile ? &profile_cast_success : success; 2452 2453 if (op->fast_check()) { 2454 assert_different_registers(klass_RInfo, k_RInfo); 2455 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2456 if (should_profile) { 2457 __ bne(CCR0, *failure_target); 2458 // Fall through to success case. 2459 } else { 2460 __ beq(CCR0, *success); 2461 // Fall through to failure case. 2462 } 2463 } else { 2464 bool need_slow_path = true; 2465 if (k->is_loaded()) { 2466 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2467 need_slow_path = false; 2468 } 2469 // Perform the fast part of the checking logic. 2470 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2471 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2472 } else { 2473 // Perform the fast part of the checking logic. 2474 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2475 } 2476 if (!need_slow_path) { 2477 if (!should_profile) { __ b(*success); } 2478 } else { 2479 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2480 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2481 // Stub needs fixed registers (tmp1-3). 2482 Register original_k_RInfo = op->tmp1()->as_register(); 2483 Register original_klass_RInfo = op->tmp2()->as_register(); 2484 Register original_Rtmp1 = op->tmp3()->as_register(); 2485 bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast); 2486 bool keep_klass_RInfo_alive = (obj == original_klass_RInfo) && should_profile; 2487 if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); } 2488 __ mr_if_needed(original_k_RInfo, k_RInfo); 2489 __ mr_if_needed(original_klass_RInfo, klass_RInfo); 2490 if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); } 2491 //__ load_const_optimized(original_Rtmp1, entry, R0); 2492 __ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false); 2493 __ mtctr(original_Rtmp1); 2494 __ bctrl(); // sets CR0 2495 if (keep_obj_alive) { 2496 if (keep_klass_RInfo_alive) { __ mr(R0, obj); } 2497 __ mr(obj, dst); 2498 } 2499 if (should_profile) { 2500 __ bne(CCR0, *failure_target); 2501 if (keep_klass_RInfo_alive) { __ mr(klass_RInfo, keep_obj_alive ? R0 : obj); } 2502 // Fall through to success case. 2503 } else { 2504 __ beq(CCR0, *success); 2505 // Fall through to failure case. 2506 } 2507 } 2508 } 2509 2510 if (should_profile) { 2511 Register mdo = k_RInfo, recv = klass_RInfo; 2512 assert_different_registers(mdo, recv, Rtmp1); 2513 __ bind(profile_cast_success); 2514 metadata2reg(md->constant_encoding(), mdo); 2515 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2516 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2517 __ b(*success); 2518 2519 // Cast failure case. 2520 __ bind(profile_cast_failure); 2521 metadata2reg(md->constant_encoding(), mdo); 2522 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2523 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2524 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2525 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2526 } 2527 2528 __ bind(*failure); 2529 } 2530 2531 2532 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2533 LIR_Code code = op->code(); 2534 if (code == lir_store_check) { 2535 Register value = op->object()->as_register(); 2536 Register array = op->array()->as_register(); 2537 Register k_RInfo = op->tmp1()->as_register(); 2538 Register klass_RInfo = op->tmp2()->as_register(); 2539 Register Rtmp1 = op->tmp3()->as_register(); 2540 bool should_profile = op->should_profile(); 2541 2542 __ verify_oop(value, FILE_AND_LINE); 2543 CodeStub* stub = op->stub(); 2544 // Check if it needs to be profiled. 2545 ciMethodData* md = NULL; 2546 ciProfileData* data = NULL; 2547 int mdo_offset_bias = 0; 2548 if (should_profile) { 2549 ciMethod* method = op->profiled_method(); 2550 assert(method != NULL, "Should have method"); 2551 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2552 } 2553 Label profile_cast_success, failure, done; 2554 Label *success_target = should_profile ? &profile_cast_success : &done; 2555 2556 __ cmpdi(CCR0, value, 0); 2557 if (should_profile) { 2558 Label not_null; 2559 __ bne(CCR0, not_null); 2560 Register mdo = k_RInfo; 2561 Register data_val = Rtmp1; 2562 metadata2reg(md->constant_encoding(), mdo); 2563 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2564 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2565 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2566 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2567 __ b(done); 2568 __ bind(not_null); 2569 } else { 2570 __ beq(CCR0, done); 2571 } 2572 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2573 explicit_null_check(array, op->info_for_exception()); 2574 } else { 2575 add_debug_info_for_null_check_here(op->info_for_exception()); 2576 } 2577 __ load_klass(k_RInfo, array); 2578 __ load_klass(klass_RInfo, value); 2579 2580 // Get instance klass. 2581 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2582 // Perform the fast part of the checking logic. 2583 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2584 2585 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2586 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2587 //__ load_const_optimized(R0, slow_path); 2588 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2589 __ mtctr(R0); 2590 __ bctrl(); // sets CR0 2591 if (!should_profile) { 2592 __ beq(CCR0, done); 2593 __ bind(failure); 2594 } else { 2595 __ bne(CCR0, failure); 2596 // Fall through to the success case. 2597 2598 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2599 assert_different_registers(value, mdo, recv, tmp1); 2600 __ bind(profile_cast_success); 2601 metadata2reg(md->constant_encoding(), mdo); 2602 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2603 __ load_klass(recv, value); 2604 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2605 __ b(done); 2606 2607 // Cast failure case. 2608 __ bind(failure); 2609 metadata2reg(md->constant_encoding(), mdo); 2610 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2611 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2612 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2613 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2614 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2615 } 2616 __ b(*stub->entry()); 2617 __ bind(done); 2618 2619 } else if (code == lir_checkcast) { 2620 Label success, failure; 2621 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); 2622 __ b(*op->stub()->entry()); 2623 __ align(32, 12); 2624 __ bind(success); 2625 __ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register()); 2626 } else if (code == lir_instanceof) { 2627 Register dst = op->result_opr()->as_register(); 2628 Label success, failure, done; 2629 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2630 __ li(dst, 0); 2631 __ b(done); 2632 __ align(32, 12); 2633 __ bind(success); 2634 __ li(dst, 1); 2635 __ bind(done); 2636 } else { 2637 ShouldNotReachHere(); 2638 } 2639 } 2640 2641 2642 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2643 Register addr = op->addr()->as_pointer_register(); 2644 Register cmp_value = noreg, new_value = noreg; 2645 bool is_64bit = false; 2646 2647 if (op->code() == lir_cas_long) { 2648 cmp_value = op->cmp_value()->as_register_lo(); 2649 new_value = op->new_value()->as_register_lo(); 2650 is_64bit = true; 2651 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2652 cmp_value = op->cmp_value()->as_register(); 2653 new_value = op->new_value()->as_register(); 2654 if (op->code() == lir_cas_obj) { 2655 if (UseCompressedOops) { 2656 Register t1 = op->tmp1()->as_register(); 2657 Register t2 = op->tmp2()->as_register(); 2658 cmp_value = __ encode_heap_oop(t1, cmp_value); 2659 new_value = __ encode_heap_oop(t2, new_value); 2660 } else { 2661 is_64bit = true; 2662 } 2663 } 2664 } else { 2665 Unimplemented(); 2666 } 2667 2668 if (is_64bit) { 2669 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2670 MacroAssembler::MemBarNone, 2671 MacroAssembler::cmpxchgx_hint_atomic_update(), 2672 noreg, NULL, /*check without ldarx first*/true); 2673 } else { 2674 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2675 MacroAssembler::MemBarNone, 2676 MacroAssembler::cmpxchgx_hint_atomic_update(), 2677 noreg, /*check without ldarx first*/true); 2678 } 2679 2680 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2681 __ isync(); 2682 } else { 2683 __ sync(); 2684 } 2685 } 2686 2687 2688 void LIR_Assembler::set_24bit_FPU() { 2689 Unimplemented(); 2690 } 2691 2692 void LIR_Assembler::reset_FPU() { 2693 Unimplemented(); 2694 } 2695 2696 2697 void LIR_Assembler::breakpoint() { 2698 __ illtrap(); 2699 } 2700 2701 2702 void LIR_Assembler::push(LIR_Opr opr) { 2703 Unimplemented(); 2704 } 2705 2706 void LIR_Assembler::pop(LIR_Opr opr) { 2707 Unimplemented(); 2708 } 2709 2710 2711 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2712 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2713 Register dst = dst_opr->as_register(); 2714 Register reg = mon_addr.base(); 2715 int offset = mon_addr.disp(); 2716 // Compute pointer to BasicLock. 2717 __ add_const_optimized(dst, reg, offset); 2718 } 2719 2720 2721 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2722 Register obj = op->obj_opr()->as_register(); 2723 Register hdr = op->hdr_opr()->as_register(); 2724 Register lock = op->lock_opr()->as_register(); 2725 2726 // Obj may not be an oop. 2727 if (op->code() == lir_lock) { 2728 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2729 if (UseFastLocking) { 2730 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2731 // Add debug info for NullPointerException only if one is possible. 2732 if (op->info() != NULL) { 2733 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2734 explicit_null_check(obj, op->info()); 2735 } else { 2736 add_debug_info_for_null_check_here(op->info()); 2737 } 2738 } 2739 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2740 } else { 2741 // always do slow locking 2742 // note: The slow locking code could be inlined here, however if we use 2743 // slow locking, speed doesn't matter anyway and this solution is 2744 // simpler and requires less duplicated code - additionally, the 2745 // slow locking code is the same in either case which simplifies 2746 // debugging. 2747 __ b(*op->stub()->entry()); 2748 } 2749 } else { 2750 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2751 if (UseFastLocking) { 2752 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2753 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2754 } else { 2755 // always do slow unlocking 2756 // note: The slow unlocking code could be inlined here, however if we use 2757 // slow unlocking, speed doesn't matter anyway and this solution is 2758 // simpler and requires less duplicated code - additionally, the 2759 // slow unlocking code is the same in either case which simplifies 2760 // debugging. 2761 __ b(*op->stub()->entry()); 2762 } 2763 } 2764 __ bind(*op->stub()->continuation()); 2765 } 2766 2767 2768 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2769 ciMethod* method = op->profiled_method(); 2770 int bci = op->profiled_bci(); 2771 ciMethod* callee = op->profiled_callee(); 2772 2773 // Update counter for all call types. 2774 ciMethodData* md = method->method_data_or_null(); 2775 assert(md != NULL, "Sanity"); 2776 ciProfileData* data = md->bci_to_data(bci); 2777 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2778 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2779 Register mdo = op->mdo()->as_register(); 2780 #ifdef _LP64 2781 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2782 Register tmp1 = op->tmp1()->as_register_lo(); 2783 #else 2784 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2785 Register tmp1 = op->tmp1()->as_register(); 2786 #endif 2787 metadata2reg(md->constant_encoding(), mdo); 2788 int mdo_offset_bias = 0; 2789 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2790 data->size_in_bytes())) { 2791 // The offset is large so bias the mdo by the base of the slot so 2792 // that the ld can use simm16s to reference the slots of the data. 2793 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2794 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2795 } 2796 2797 // Perform additional virtual call profiling for invokevirtual and 2798 // invokeinterface bytecodes 2799 if (op->should_profile_receiver_type()) { 2800 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2801 Register recv = op->recv()->as_register(); 2802 assert_different_registers(mdo, tmp1, recv); 2803 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2804 ciKlass* known_klass = op->known_holder(); 2805 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2806 // We know the type that will be seen at this call site; we can 2807 // statically update the MethodData* rather than needing to do 2808 // dynamic tests on the receiver type. 2809 2810 // NOTE: we should probably put a lock around this search to 2811 // avoid collisions by concurrent compilations. 2812 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2813 uint i; 2814 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2815 ciKlass* receiver = vc_data->receiver(i); 2816 if (known_klass->equals(receiver)) { 2817 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2818 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2819 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2820 return; 2821 } 2822 } 2823 2824 // Receiver type not found in profile data; select an empty slot. 2825 2826 // Note that this is less efficient than it should be because it 2827 // always does a write to the receiver part of the 2828 // VirtualCallData rather than just the first time. 2829 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2830 ciKlass* receiver = vc_data->receiver(i); 2831 if (receiver == NULL) { 2832 metadata2reg(known_klass->constant_encoding(), tmp1); 2833 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2834 2835 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2836 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2837 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2838 return; 2839 } 2840 } 2841 } else { 2842 __ load_klass(recv, recv); 2843 Label update_done; 2844 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2845 // Receiver did not match any saved receiver and there is no empty row for it. 2846 // Increment total counter to indicate polymorphic case. 2847 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2848 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2849 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2850 2851 __ bind(update_done); 2852 } 2853 } else { 2854 // Static call 2855 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2856 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2857 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2858 } 2859 } 2860 2861 2862 void LIR_Assembler::align_backward_branch_target() { 2863 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2864 } 2865 2866 2867 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2868 Unimplemented(); 2869 } 2870 2871 2872 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2873 // tmp must be unused 2874 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2875 assert(left->is_register(), "can only handle registers"); 2876 2877 if (left->is_single_cpu()) { 2878 __ neg(dest->as_register(), left->as_register()); 2879 } else if (left->is_single_fpu()) { 2880 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2881 } else if (left->is_double_fpu()) { 2882 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2883 } else { 2884 assert (left->is_double_cpu(), "Must be a long"); 2885 __ neg(dest->as_register_lo(), left->as_register_lo()); 2886 } 2887 } 2888 2889 2890 void LIR_Assembler::fxch(int i) { 2891 Unimplemented(); 2892 } 2893 2894 void LIR_Assembler::fld(int i) { 2895 Unimplemented(); 2896 } 2897 2898 void LIR_Assembler::ffree(int i) { 2899 Unimplemented(); 2900 } 2901 2902 2903 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2904 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2905 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2906 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2907 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2908 //__ load_const_optimized(R0, dest); 2909 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2910 __ mtctr(R0); 2911 __ bctrl(); 2912 assert(info != NULL, "sanity"); 2913 add_call_info_here(info); 2914 return; 2915 } 2916 2917 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2918 if (info != NULL) { 2919 add_call_info_here(info); 2920 } 2921 } 2922 2923 2924 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2925 ShouldNotReachHere(); // Not needed on _LP64. 2926 } 2927 2928 void LIR_Assembler::membar() { 2929 __ fence(); 2930 } 2931 2932 void LIR_Assembler::membar_acquire() { 2933 __ acquire(); 2934 } 2935 2936 void LIR_Assembler::membar_release() { 2937 __ release(); 2938 } 2939 2940 void LIR_Assembler::membar_loadload() { 2941 __ membar(Assembler::LoadLoad); 2942 } 2943 2944 void LIR_Assembler::membar_storestore() { 2945 __ membar(Assembler::StoreStore); 2946 } 2947 2948 void LIR_Assembler::membar_loadstore() { 2949 __ membar(Assembler::LoadStore); 2950 } 2951 2952 void LIR_Assembler::membar_storeload() { 2953 __ membar(Assembler::StoreLoad); 2954 } 2955 2956 void LIR_Assembler::on_spin_wait() { 2957 Unimplemented(); 2958 } 2959 2960 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2961 assert(patch_code == lir_patch_none, "Patch code not supported"); 2962 LIR_Address* addr = addr_opr->as_address_ptr(); 2963 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2964 if (addr->index()->is_illegal()) { 2965 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2966 } else { 2967 assert(addr->disp() == 0, "can't have both: index and disp"); 2968 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2969 } 2970 } 2971 2972 2973 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2974 ShouldNotReachHere(); 2975 } 2976 2977 2978 #ifdef ASSERT 2979 // Emit run-time assertion. 2980 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2981 Unimplemented(); 2982 } 2983 #endif 2984 2985 2986 void LIR_Assembler::peephole(LIR_List* lir) { 2987 // Optimize instruction pairs before emitting. 2988 LIR_OpList* inst = lir->instructions_list(); 2989 for (int i = 1; i < inst->length(); i++) { 2990 LIR_Op* op = inst->at(i); 2991 2992 // 2 register-register-moves 2993 if (op->code() == lir_move) { 2994 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2995 res2 = ((LIR_Op1*)op)->result_opr(); 2996 if (in2->is_register() && res2->is_register()) { 2997 LIR_Op* prev = inst->at(i - 1); 2998 if (prev && prev->code() == lir_move) { 2999 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 3000 res1 = ((LIR_Op1*)prev)->result_opr(); 3001 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 3002 inst->remove_at(i); 3003 } 3004 } 3005 } 3006 } 3007 3008 } 3009 return; 3010 } 3011 3012 3013 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3014 const LIR_Address *addr = src->as_address_ptr(); 3015 assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!"); 3016 const Register Rptr = addr->base()->as_pointer_register(), 3017 Rtmp = tmp->as_register(); 3018 Register Rco = noreg; 3019 if (UseCompressedOops && data->is_oop()) { 3020 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 3021 } 3022 3023 Label Lretry; 3024 __ bind(Lretry); 3025 3026 if (data->type() == T_INT) { 3027 const Register Rold = dest->as_register(), 3028 Rsrc = data->as_register(); 3029 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3030 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3031 if (code == lir_xadd) { 3032 __ add(Rtmp, Rsrc, Rold); 3033 __ stwcx_(Rtmp, Rptr); 3034 } else { 3035 __ stwcx_(Rsrc, Rptr); 3036 } 3037 } else if (data->is_oop()) { 3038 assert(code == lir_xchg, "xadd for oops"); 3039 const Register Rold = dest->as_register(); 3040 if (UseCompressedOops) { 3041 assert_different_registers(Rptr, Rold, Rco); 3042 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3043 __ stwcx_(Rco, Rptr); 3044 } else { 3045 const Register Robj = data->as_register(); 3046 assert_different_registers(Rptr, Rold, Robj); 3047 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3048 __ stdcx_(Robj, Rptr); 3049 } 3050 } else if (data->type() == T_LONG) { 3051 const Register Rold = dest->as_register_lo(), 3052 Rsrc = data->as_register_lo(); 3053 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3054 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3055 if (code == lir_xadd) { 3056 __ add(Rtmp, Rsrc, Rold); 3057 __ stdcx_(Rtmp, Rptr); 3058 } else { 3059 __ stdcx_(Rsrc, Rptr); 3060 } 3061 } else { 3062 ShouldNotReachHere(); 3063 } 3064 3065 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3066 __ bne_predict_not_taken(CCR0, Lretry); 3067 } else { 3068 __ bne( CCR0, Lretry); 3069 } 3070 3071 if (UseCompressedOops && data->is_oop()) { 3072 __ decode_heap_oop(dest->as_register()); 3073 } 3074 } 3075 3076 3077 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3078 Register obj = op->obj()->as_register(); 3079 Register tmp = op->tmp()->as_pointer_register(); 3080 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3081 ciKlass* exact_klass = op->exact_klass(); 3082 intptr_t current_klass = op->current_klass(); 3083 bool not_null = op->not_null(); 3084 bool no_conflict = op->no_conflict(); 3085 3086 Label Lupdate, Ldo_update, Ldone; 3087 3088 bool do_null = !not_null; 3089 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3090 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3091 3092 assert(do_null || do_update, "why are we here?"); 3093 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3094 3095 __ verify_oop(obj, FILE_AND_LINE); 3096 3097 if (do_null) { 3098 if (!TypeEntries::was_null_seen(current_klass)) { 3099 __ cmpdi(CCR0, obj, 0); 3100 __ bne(CCR0, Lupdate); 3101 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3102 __ ori(R0, R0, TypeEntries::null_seen); 3103 if (do_update) { 3104 __ b(Ldo_update); 3105 } else { 3106 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3107 } 3108 } else { 3109 if (do_update) { 3110 __ cmpdi(CCR0, obj, 0); 3111 __ beq(CCR0, Ldone); 3112 } 3113 } 3114 #ifdef ASSERT 3115 } else { 3116 __ cmpdi(CCR0, obj, 0); 3117 __ bne(CCR0, Lupdate); 3118 __ stop("unexpect null obj", 0x9652); 3119 #endif 3120 } 3121 3122 __ bind(Lupdate); 3123 if (do_update) { 3124 Label Lnext; 3125 const Register klass = R29_TOC; // kill and reload 3126 bool klass_reg_used = false; 3127 #ifdef ASSERT 3128 if (exact_klass != NULL) { 3129 Label ok; 3130 klass_reg_used = true; 3131 __ load_klass(klass, obj); 3132 metadata2reg(exact_klass->constant_encoding(), R0); 3133 __ cmpd(CCR0, klass, R0); 3134 __ beq(CCR0, ok); 3135 __ stop("exact klass and actual klass differ", 0x8564); 3136 __ bind(ok); 3137 } 3138 #endif 3139 3140 if (!no_conflict) { 3141 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3142 klass_reg_used = true; 3143 if (exact_klass != NULL) { 3144 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3145 metadata2reg(exact_klass->constant_encoding(), klass); 3146 } else { 3147 __ load_klass(klass, obj); 3148 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3149 } 3150 3151 // Like InterpreterMacroAssembler::profile_obj_type 3152 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3153 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3154 __ cmpd(CCR1, R0, klass); 3155 // Klass seen before, nothing to do (regardless of unknown bit). 3156 //beq(CCR1, do_nothing); 3157 3158 __ andi_(R0, klass, TypeEntries::type_unknown); 3159 // Already unknown. Nothing to do anymore. 3160 //bne(CCR0, do_nothing); 3161 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3162 __ beq(CCR0, Lnext); 3163 3164 if (TypeEntries::is_type_none(current_klass)) { 3165 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3166 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3167 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3168 } 3169 3170 } else { 3171 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3172 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3173 3174 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3175 __ andi_(R0, tmp, TypeEntries::type_unknown); 3176 // Already unknown. Nothing to do anymore. 3177 __ bne(CCR0, Lnext); 3178 } 3179 3180 // Different than before. Cannot keep accurate profile. 3181 __ ori(R0, tmp, TypeEntries::type_unknown); 3182 } else { 3183 // There's a single possible klass at this profile point 3184 assert(exact_klass != NULL, "should be"); 3185 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3186 3187 if (TypeEntries::is_type_none(current_klass)) { 3188 klass_reg_used = true; 3189 metadata2reg(exact_klass->constant_encoding(), klass); 3190 3191 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3192 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3193 __ cmpd(CCR1, R0, klass); 3194 // Klass seen before, nothing to do (regardless of unknown bit). 3195 __ beq(CCR1, Lnext); 3196 #ifdef ASSERT 3197 { 3198 Label ok; 3199 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3200 __ beq(CCR0, ok); // First time here. 3201 3202 __ stop("unexpected profiling mismatch", 0x7865); 3203 __ bind(ok); 3204 } 3205 #endif 3206 // First time here. Set profile type. 3207 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3208 } else { 3209 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3210 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3211 3212 // Already unknown. Nothing to do anymore. 3213 __ andi_(R0, tmp, TypeEntries::type_unknown); 3214 __ bne(CCR0, Lnext); 3215 3216 // Different than before. Cannot keep accurate profile. 3217 __ ori(R0, tmp, TypeEntries::type_unknown); 3218 } 3219 } 3220 3221 __ bind(Ldo_update); 3222 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3223 3224 __ bind(Lnext); 3225 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3226 } 3227 __ bind(Ldone); 3228 } 3229 3230 3231 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3232 assert(op->crc()->is_single_cpu(), "crc must be register"); 3233 assert(op->val()->is_single_cpu(), "byte value must be register"); 3234 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3235 Register crc = op->crc()->as_register(); 3236 Register val = op->val()->as_register(); 3237 Register res = op->result_opr()->as_register(); 3238 3239 assert_different_registers(val, crc, res); 3240 3241 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3242 __ kernel_crc32_singleByteReg(crc, val, res, true); 3243 __ mr(res, crc); 3244 } 3245 3246 #undef __