< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page




  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"

  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?


 488   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 489   __ end_a_stub();
 490 
 491   return offset;
 492 }
 493 
 494 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 495   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 496   int pc_offset = code_offset();
 497   flush_debug_info(pc_offset);
 498   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 499   if (info->exception_handlers() != NULL) {
 500     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 501   }
 502 }
 503 
 504 void LIR_Assembler::return_op(LIR_Opr result) {
 505   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 506 
 507   ciMethod* method = compilation()->method();















 508   // Pop the stack before the safepoint code
 509   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 510 
 511   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 512     __ reserved_stack_check();
 513   }
 514 
 515   address polling_page(os::get_polling_page());
 516   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 517   __ ret(lr);
 518 }
 519 
 520 void LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { 
 521   __ store_value_type_fields_to_buf(vk);
 522 }
 523 
 524 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 525   address polling_page(os::get_polling_page());
 526   guarantee(info != NULL, "Shouldn't be NULL");
 527   assert(os::is_poll_address(polling_page), "should be");
 528   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 529   add_debug_info_for_branch(info);  // This isn't just debug info:
 530                                     // it's the oop map
 531   __ read_polling_page(rscratch1, relocInfo::poll_type);
 532   return __ offset();
 533 }
 534 
 535 
 536 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 537   if (from_reg == r31_sp)
 538     from_reg = sp;
 539   if (to_reg == r31_sp)
 540     to_reg = sp;
 541   __ mov(to_reg, from_reg);


 665 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 666   assert(src->is_constant(), "should not call otherwise");
 667   LIR_Const* c = src->as_constant_ptr();
 668   LIR_Address* to_addr = dest->as_address_ptr();
 669 
 670   void (Assembler::* insn)(Register Rt, const Address &adr);
 671 
 672   switch (type) {
 673   case T_ADDRESS:
 674     assert(c->as_jint() == 0, "should be");
 675     insn = &Assembler::str;
 676     break;
 677   case T_LONG:
 678     assert(c->as_jlong() == 0, "should be");
 679     insn = &Assembler::str;
 680     break;
 681   case T_INT:
 682     assert(c->as_jint() == 0, "should be");
 683     insn = &Assembler::strw;
 684     break;
 685   case T_VALUETYPE: // DMS CHECK: the code is significantly differ from x86
 686   case T_OBJECT:
 687   case T_ARRAY:


 688     assert(c->as_jobject() == 0, "should be");
 689     if (UseCompressedOops && !wide) {
 690       insn = &Assembler::strw;
 691     } else {
 692       insn = &Assembler::str;
 693     }
 694     break;
 695   case T_CHAR:
 696   case T_SHORT:
 697     assert(c->as_jint() == 0, "should be");
 698     insn = &Assembler::strh;
 699     break;
 700   case T_BOOLEAN:
 701   case T_BYTE:
 702     assert(c->as_jint() == 0, "should be");
 703     insn = &Assembler::strb;
 704     break;
 705   default:
 706     ShouldNotReachHere();
 707     insn = &Assembler::str;  // unreachable


1623 
1624     __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes()));
1625     __ andr(tmp1, tmp1, tmp2);
1626 
1627     __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes()));
1628     __ andr(tmp1, tmp1, tmp2); 
1629 
1630     __ mov(tmp2, (intptr_t)markOopDesc::always_locked_pattern);
1631     __ cmp(tmp1, tmp2); 
1632     __ br(Assembler::NE, L_oops_not_equal);
1633   }
1634 
1635   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1636   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
1637     // No need to load klass -- the operands are statically known to be the same value klass.
1638     __ b(*op->stub()->entry());
1639   } else {
1640     Register left_klass_op = op->left_klass_op()->as_register();
1641     Register right_klass_op = op->right_klass_op()->as_register();
1642 
1643     // DMS CHECK, likely x86 bug, make aarch64 implementation correct
1644     __ load_klass(left_klass_op, left);
1645     __ load_klass(right_klass_op, right);




1646     __ cmp(left_klass_op, right_klass_op);


1647     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1648     // fall through to L_oops_not_equal
1649   }
1650 
1651   __ bind(L_oops_not_equal);
1652   move(op->not_equal_result(), op->result_opr());
1653   __ b(L_end);
1654 
1655   __ bind(L_oops_equal);
1656   move(op->equal_result(), op->result_opr());
1657   __ b(L_end);
1658 
1659   // We've returned from the stub. op->result_opr() contains 0x0 IFF the two
1660   // operands are not substitutable. (Don't compare against 0x1 in case the
1661   // C compiler is naughty)
1662   __ bind(*op->stub()->continuation());
1663 
1664   if (op->result_opr()->type() == T_LONG) {
1665     __ cbzw(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1666   } else {




  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "ci/ciValueKlass.hpp"
  38 #include "code/compiledIC.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/cardTableBarrierSet.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "nativeInst_aarch64.hpp"
  43 #include "oops/objArrayKlass.hpp"
  44 #include "oops/oop.inline.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "vmreg_aarch64.inline.hpp"
  48 
  49 
  50 
  51 #ifndef PRODUCT
  52 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  53 #else
  54 #define COMMENT(x)
  55 #endif
  56 
  57 NEEDS_CLEANUP // remove this definitions ?


 489   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 490   __ end_a_stub();
 491 
 492   return offset;
 493 }
 494 
 495 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 496   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 497   int pc_offset = code_offset();
 498   flush_debug_info(pc_offset);
 499   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 500   if (info->exception_handlers() != NULL) {
 501     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 502   }
 503 }
 504 
 505 void LIR_Assembler::return_op(LIR_Opr result) {
 506   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 507 
 508   ciMethod* method = compilation()->method();
 509 
 510   if (ValueTypeReturnedAsFields && method->signature()->returns_never_null()) {
 511     ciType* return_type = method->return_type();
 512     if (return_type->is_valuetype()) {
 513       ciValueKlass* vk = return_type->as_value_klass();
 514       if (vk->can_be_returned_as_fields()) {
 515         address unpack_handler = vk->unpack_handler();
 516         assert(unpack_handler != NULL, "must be");
 517         __ far_call(RuntimeAddress(unpack_handler));
 518         // At this point, rax points to the value object (for interpreter or C1 caller).
 519         // The fields of the object are copied into registers (for C2 caller).
 520       }
 521     }
 522   }
 523 
 524   // Pop the stack before the safepoint code
 525   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 526 
 527   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 528     __ reserved_stack_check();
 529   }
 530 
 531   address polling_page(os::get_polling_page());
 532   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 533   __ ret(lr);
 534 }
 535 
 536 int LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { 
 537  return (__ store_value_type_fields_to_buf(vk, false));
 538 }
 539 
 540 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 541   address polling_page(os::get_polling_page());
 542   guarantee(info != NULL, "Shouldn't be NULL");
 543   assert(os::is_poll_address(polling_page), "should be");
 544   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 545   add_debug_info_for_branch(info);  // This isn't just debug info:
 546                                     // it's the oop map
 547   __ read_polling_page(rscratch1, relocInfo::poll_type);
 548   return __ offset();
 549 }
 550 
 551 
 552 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 553   if (from_reg == r31_sp)
 554     from_reg = sp;
 555   if (to_reg == r31_sp)
 556     to_reg = sp;
 557   __ mov(to_reg, from_reg);


 681 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 682   assert(src->is_constant(), "should not call otherwise");
 683   LIR_Const* c = src->as_constant_ptr();
 684   LIR_Address* to_addr = dest->as_address_ptr();
 685 
 686   void (Assembler::* insn)(Register Rt, const Address &adr);
 687 
 688   switch (type) {
 689   case T_ADDRESS:
 690     assert(c->as_jint() == 0, "should be");
 691     insn = &Assembler::str;
 692     break;
 693   case T_LONG:
 694     assert(c->as_jlong() == 0, "should be");
 695     insn = &Assembler::str;
 696     break;
 697   case T_INT:
 698     assert(c->as_jint() == 0, "should be");
 699     insn = &Assembler::strw;
 700     break;
 701   case T_VALUETYPE: 
 702   case T_OBJECT:
 703   case T_ARRAY:
 704     // Non-null case is not handled on aarch64 but handled on x86
 705     // FIXME: do we need to add it here?
 706     assert(c->as_jobject() == 0, "should be");
 707     if (UseCompressedOops && !wide) {
 708       insn = &Assembler::strw;
 709     } else {
 710       insn = &Assembler::str;
 711     }
 712     break;
 713   case T_CHAR:
 714   case T_SHORT:
 715     assert(c->as_jint() == 0, "should be");
 716     insn = &Assembler::strh;
 717     break;
 718   case T_BOOLEAN:
 719   case T_BYTE:
 720     assert(c->as_jint() == 0, "should be");
 721     insn = &Assembler::strb;
 722     break;
 723   default:
 724     ShouldNotReachHere();
 725     insn = &Assembler::str;  // unreachable


1641 
1642     __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes()));
1643     __ andr(tmp1, tmp1, tmp2);
1644 
1645     __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes()));
1646     __ andr(tmp1, tmp1, tmp2); 
1647 
1648     __ mov(tmp2, (intptr_t)markOopDesc::always_locked_pattern);
1649     __ cmp(tmp1, tmp2); 
1650     __ br(Assembler::NE, L_oops_not_equal);
1651   }
1652 
1653   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1654   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
1655     // No need to load klass -- the operands are statically known to be the same value klass.
1656     __ b(*op->stub()->entry());
1657   } else {
1658     Register left_klass_op = op->left_klass_op()->as_register();
1659     Register right_klass_op = op->right_klass_op()->as_register();
1660 
1661     if (UseCompressedOops) {
1662       __ ldrw(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1663       __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1664       __ cmpw(left_klass_op, right_klass_op);
1665     } else {
1666       __ ldr(left_klass_op,  Address(left,  oopDesc::klass_offset_in_bytes()));
1667       __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1668       __ cmp(left_klass_op, right_klass_op);
1669     }
1670 
1671     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1672     // fall through to L_oops_not_equal
1673   }
1674 
1675   __ bind(L_oops_not_equal);
1676   move(op->not_equal_result(), op->result_opr());
1677   __ b(L_end);
1678 
1679   __ bind(L_oops_equal);
1680   move(op->equal_result(), op->result_opr());
1681   __ b(L_end);
1682 
1683   // We've returned from the stub. op->result_opr() contains 0x0 IFF the two
1684   // operands are not substitutable. (Don't compare against 0x1 in case the
1685   // C compiler is naughty)
1686   __ bind(*op->stub()->continuation());
1687 
1688   if (op->result_opr()->type() == T_LONG) {
1689     __ cbzw(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1690   } else {


< prev index next >