< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page




4240   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4241   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4242   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4243   emit_int8(0x00);
4244   emit_operand(dst, src);
4245 }
4246 
4247 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
4248   assert(isByte(mode), "invalid value");
4249   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4250   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
4251   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4252   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4253   emit_int8(0x70);
4254   emit_int8((unsigned char)(0xC0 | encode));
4255   emit_int8(mode & 0xFF);
4256 }
4257 
4258 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
4259   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4260          vector_len == AVX_256bit? VM_Version::supports_avx2() :
4261          0, "");
4262   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4263   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4264   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4265   emit_int8(0x70);
4266   emit_int8((unsigned char)(0xC0 | encode));
4267   emit_int8(mode & 0xFF);
4268 }
4269 
4270 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
4271   assert(isByte(mode), "invalid value");
4272   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4273   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4274   InstructionMark im(this);
4275   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4276   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4277   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4278   emit_int8(0x70);
4279   emit_operand(dst, src);
4280   emit_int8(mode & 0xFF);
4281 }


4720 void Assembler::shll(Register dst) {
4721   int encode = prefix_and_encode(dst->encoding());
4722   emit_int8((unsigned char)0xD3);
4723   emit_int8((unsigned char)(0xE0 | encode));
4724 }
4725 
4726 void Assembler::shrl(Register dst, int imm8) {
4727   assert(isShiftCount(imm8), "illegal shift count");
4728   int encode = prefix_and_encode(dst->encoding());
4729   emit_int8((unsigned char)0xC1);
4730   emit_int8((unsigned char)(0xE8 | encode));
4731   emit_int8(imm8);
4732 }
4733 
4734 void Assembler::shrl(Register dst) {
4735   int encode = prefix_and_encode(dst->encoding());
4736   emit_int8((unsigned char)0xD3);
4737   emit_int8((unsigned char)(0xE8 | encode));
4738 }
4739 






























4740 // copies a single word from [esi] to [edi]
4741 void Assembler::smovl() {
4742   emit_int8((unsigned char)0xA5);
4743 }
4744 
4745 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
4746   assert(VM_Version::supports_sse4_1(), "");
4747   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4748   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4749   emit_int8(0x0B);
4750   emit_int8((unsigned char)(0xC0 | encode));
4751   emit_int8((unsigned char)rmode);
4752 }
4753 
4754 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
4755   assert(VM_Version::supports_sse4_1(), "");
4756   InstructionMark im(this);
4757   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4758   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4759   emit_int8(0x0B);


6496 }
6497 
6498 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6499   assert(UseAVX > 0, "requires some form of AVX");
6500   InstructionMark im(this);
6501   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6502   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6503   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6504   emit_int8((unsigned char)0xDB);
6505   emit_operand(dst, src);
6506 }
6507 
6508 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6509   assert(VM_Version::supports_evex(), "");
6510   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6511   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6512   emit_int8((unsigned char)0xDB);
6513   emit_int8((unsigned char)(0xC0 | encode));
6514 }
6515 

















6516 
6517 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
6518   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6519   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6520   attributes.set_rex_vex_w_reverted();
6521   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6522   emit_int8((unsigned char)0xDF);
6523   emit_int8((unsigned char)(0xC0 | encode));
6524 }
6525 
6526 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6527   assert(UseAVX > 0, "requires some form of AVX");
6528   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6529   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6530   emit_int8((unsigned char)0xDF);
6531   emit_int8((unsigned char)(0xC0 | encode));
6532 }
6533 
6534 
6535 void Assembler::por(XMMRegister dst, XMMRegister src) {


8090 }
8091 
8092 void Assembler::popa() { // 32bit
8093   emit_int8(0x61);
8094 }
8095 
8096 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
8097   InstructionMark im(this);
8098   emit_int8(0x68);
8099   emit_data(imm32, rspec, 0);
8100 }
8101 
8102 void Assembler::pusha() { // 32bit
8103   emit_int8(0x60);
8104 }
8105 
8106 void Assembler::set_byte_if_not_zero(Register dst) {
8107   emit_int8(0x0F);
8108   emit_int8((unsigned char)0x95);
8109   emit_int8((unsigned char)(0xE0 | dst->encoding()));
8110 }
8111 
8112 void Assembler::shldl(Register dst, Register src) {
8113   emit_int8(0x0F);
8114   emit_int8((unsigned char)0xA5);
8115   emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
8116 }
8117 
8118 // 0F A4 / r ib
8119 void Assembler::shldl(Register dst, Register src, int8_t imm8) {
8120   emit_int8(0x0F);
8121   emit_int8((unsigned char)0xA4);
8122   emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
8123   emit_int8(imm8);
8124 }
8125 
8126 void Assembler::shrdl(Register dst, Register src) {
8127   emit_int8(0x0F);
8128   emit_int8((unsigned char)0xAD);
8129   emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
8130 }
8131 
8132 #else // LP64
8133 
8134 void Assembler::set_byte_if_not_zero(Register dst) {
8135   int enc = prefix_and_encode(dst->encoding(), true);
8136   emit_int8(0x0F);
8137   emit_int8((unsigned char)0x95);
8138   emit_int8((unsigned char)(0xE0 | enc));
8139 }
8140 
8141 // 64bit only pieces of the assembler
8142 // This should only be used by 64bit instructions that can use rip-relative
8143 // it cannot be used by instructions that want an immediate value.
8144 
8145 bool Assembler::reachable(AddressLiteral adr) {
8146   int64_t disp;
8147   // None will force a 64bit literal to the code stream. Likely a placeholder
8148   // for something that will be patched later and we need to certain it will
8149   // always be reachable.




4240   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4241   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4242   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4243   emit_int8(0x00);
4244   emit_operand(dst, src);
4245 }
4246 
4247 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
4248   assert(isByte(mode), "invalid value");
4249   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4250   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
4251   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4252   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4253   emit_int8(0x70);
4254   emit_int8((unsigned char)(0xC0 | encode));
4255   emit_int8(mode & 0xFF);
4256 }
4257 
4258 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
4259   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4260          (vector_len == AVX_256bit? VM_Version::supports_avx2() :
4261          (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), "");
4262   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4263   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4264   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4265   emit_int8(0x70);
4266   emit_int8((unsigned char)(0xC0 | encode));
4267   emit_int8(mode & 0xFF);
4268 }
4269 
4270 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
4271   assert(isByte(mode), "invalid value");
4272   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4273   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4274   InstructionMark im(this);
4275   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4276   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4277   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4278   emit_int8(0x70);
4279   emit_operand(dst, src);
4280   emit_int8(mode & 0xFF);
4281 }


4720 void Assembler::shll(Register dst) {
4721   int encode = prefix_and_encode(dst->encoding());
4722   emit_int8((unsigned char)0xD3);
4723   emit_int8((unsigned char)(0xE0 | encode));
4724 }
4725 
4726 void Assembler::shrl(Register dst, int imm8) {
4727   assert(isShiftCount(imm8), "illegal shift count");
4728   int encode = prefix_and_encode(dst->encoding());
4729   emit_int8((unsigned char)0xC1);
4730   emit_int8((unsigned char)(0xE8 | encode));
4731   emit_int8(imm8);
4732 }
4733 
4734 void Assembler::shrl(Register dst) {
4735   int encode = prefix_and_encode(dst->encoding());
4736   emit_int8((unsigned char)0xD3);
4737   emit_int8((unsigned char)(0xE8 | encode));
4738 }
4739 
4740 void Assembler::shldl(Register dst, Register src) {
4741   int encode = prefix_and_encode(src->encoding(), dst->encoding());
4742   emit_int8(0x0F);
4743   emit_int8((unsigned char)0xA5);
4744   emit_int8((unsigned char)(0xC0 | encode));
4745 }
4746 
4747 void Assembler::shldl(Register dst, Register src, int8_t imm8) {
4748   int encode = prefix_and_encode(src->encoding(), dst->encoding());
4749   emit_int8(0x0F);
4750   emit_int8((unsigned char)0xA4);
4751   emit_int8((unsigned char)(0xC0 | encode));
4752   emit_int8(imm8);
4753 }
4754 
4755 void Assembler::shrdl(Register dst, Register src) {
4756   int encode = prefix_and_encode(src->encoding(), dst->encoding());
4757   emit_int8(0x0F);
4758   emit_int8((unsigned char)0xAD);
4759   emit_int8((unsigned char)(0xC0 | encode));
4760 }
4761 
4762 void Assembler::shrdl(Register dst, Register src, int8_t imm8) {
4763   int encode = prefix_and_encode(src->encoding(), dst->encoding());
4764   emit_int8(0x0F);
4765   emit_int8((unsigned char)0xAC);
4766   emit_int8((unsigned char)(0xC0 | encode));
4767   emit_int8(imm8);
4768 }
4769 
4770 // copies a single word from [esi] to [edi]
4771 void Assembler::smovl() {
4772   emit_int8((unsigned char)0xA5);
4773 }
4774 
4775 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) {
4776   assert(VM_Version::supports_sse4_1(), "");
4777   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4778   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4779   emit_int8(0x0B);
4780   emit_int8((unsigned char)(0xC0 | encode));
4781   emit_int8((unsigned char)rmode);
4782 }
4783 
4784 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) {
4785   assert(VM_Version::supports_sse4_1(), "");
4786   InstructionMark im(this);
4787   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4788   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4789   emit_int8(0x0B);


6526 }
6527 
6528 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6529   assert(UseAVX > 0, "requires some form of AVX");
6530   InstructionMark im(this);
6531   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6532   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6533   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6534   emit_int8((unsigned char)0xDB);
6535   emit_operand(dst, src);
6536 }
6537 
6538 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6539   assert(VM_Version::supports_evex(), "");
6540   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6541   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6542   emit_int8((unsigned char)0xDB);
6543   emit_int8((unsigned char)(0xC0 | encode));
6544 }
6545 
6546 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6547   assert(UseVBMI2, "requires vbmi2");
6548   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6549   attributes.set_is_evex_instruction();
6550   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6551   emit_int8(0x71);
6552   emit_int8((unsigned char)(0xC0 | encode));
6553 }
6554 
6555 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6556   assert(UseVBMI2, "requires vbmi2");
6557   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6558   attributes.set_is_evex_instruction();
6559   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6560   emit_int8(0x73);
6561   emit_int8((unsigned char)(0xC0 | encode));
6562 }
6563 
6564 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
6565   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6566   InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6567   attributes.set_rex_vex_w_reverted();
6568   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6569   emit_int8((unsigned char)0xDF);
6570   emit_int8((unsigned char)(0xC0 | encode));
6571 }
6572 
6573 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6574   assert(UseAVX > 0, "requires some form of AVX");
6575   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6576   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6577   emit_int8((unsigned char)0xDF);
6578   emit_int8((unsigned char)(0xC0 | encode));
6579 }
6580 
6581 
6582 void Assembler::por(XMMRegister dst, XMMRegister src) {


8137 }
8138 
8139 void Assembler::popa() { // 32bit
8140   emit_int8(0x61);
8141 }
8142 
8143 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
8144   InstructionMark im(this);
8145   emit_int8(0x68);
8146   emit_data(imm32, rspec, 0);
8147 }
8148 
8149 void Assembler::pusha() { // 32bit
8150   emit_int8(0x60);
8151 }
8152 
8153 void Assembler::set_byte_if_not_zero(Register dst) {
8154   emit_int8(0x0F);
8155   emit_int8((unsigned char)0x95);
8156   emit_int8((unsigned char)(0xE0 | dst->encoding()));




















8157 }
8158 
8159 #else // LP64
8160 
8161 void Assembler::set_byte_if_not_zero(Register dst) {
8162   int enc = prefix_and_encode(dst->encoding(), true);
8163   emit_int8(0x0F);
8164   emit_int8((unsigned char)0x95);
8165   emit_int8((unsigned char)(0xE0 | enc));
8166 }
8167 
8168 // 64bit only pieces of the assembler
8169 // This should only be used by 64bit instructions that can use rip-relative
8170 // it cannot be used by instructions that want an immediate value.
8171 
8172 bool Assembler::reachable(AddressLiteral adr) {
8173   int64_t disp;
8174   // None will force a 64bit literal to the code stream. Likely a placeholder
8175   // for something that will be patched later and we need to certain it will
8176   // always be reachable.


< prev index next >