1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "gc/shared/cardTableBarrierSet.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "prims/methodHandles.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/objectMonitor.hpp" 35 #include "runtime/os.hpp" 36 #include "runtime/sharedRuntime.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "utilities/macros.hpp" 39 40 #ifdef PRODUCT 41 #define BLOCK_COMMENT(str) /* nothing */ 42 #define STOP(error) stop(error) 43 #else 44 #define BLOCK_COMMENT(str) block_comment(str) 45 #define STOP(error) block_comment(error); stop(error) 46 #endif 47 48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 49 // Implementation of AddressLiteral 50 51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms. 52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = { 53 // -----------------Table 4.5 -------------------- // 54 16, 32, 64, // EVEX_FV(0) 55 4, 4, 4, // EVEX_FV(1) - with Evex.b 56 16, 32, 64, // EVEX_FV(2) - with Evex.w 57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b 58 8, 16, 32, // EVEX_HV(0) 59 4, 4, 4, // EVEX_HV(1) - with Evex.b 60 // -----------------Table 4.6 -------------------- // 61 16, 32, 64, // EVEX_FVM(0) 62 1, 1, 1, // EVEX_T1S(0) 63 2, 2, 2, // EVEX_T1S(1) 64 4, 4, 4, // EVEX_T1S(2) 65 8, 8, 8, // EVEX_T1S(3) 66 4, 4, 4, // EVEX_T1F(0) 67 8, 8, 8, // EVEX_T1F(1) 68 8, 8, 8, // EVEX_T2(0) 69 0, 16, 16, // EVEX_T2(1) 70 0, 16, 16, // EVEX_T4(0) 71 0, 0, 32, // EVEX_T4(1) 72 0, 0, 32, // EVEX_T8(0) 73 8, 16, 32, // EVEX_HVM(0) 74 4, 8, 16, // EVEX_QVM(0) 75 2, 4, 8, // EVEX_OVM(0) 76 16, 16, 16, // EVEX_M128(0) 77 8, 32, 64, // EVEX_DUP(0) 78 0, 0, 0 // EVEX_NTUP 79 }; 80 81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { 82 _is_lval = false; 83 _target = target; 84 switch (rtype) { 85 case relocInfo::oop_type: 86 case relocInfo::metadata_type: 87 // Oops are a special case. Normally they would be their own section 88 // but in cases like icBuffer they are literals in the code stream that 89 // we don't have a section for. We use none so that we get a literal address 90 // which is always patchable. 91 break; 92 case relocInfo::external_word_type: 93 _rspec = external_word_Relocation::spec(target); 94 break; 95 case relocInfo::internal_word_type: 96 _rspec = internal_word_Relocation::spec(target); 97 break; 98 case relocInfo::opt_virtual_call_type: 99 _rspec = opt_virtual_call_Relocation::spec(); 100 break; 101 case relocInfo::static_call_type: 102 _rspec = static_call_Relocation::spec(); 103 break; 104 case relocInfo::runtime_call_type: 105 _rspec = runtime_call_Relocation::spec(); 106 break; 107 case relocInfo::poll_type: 108 case relocInfo::poll_return_type: 109 _rspec = Relocation::spec_simple(rtype); 110 break; 111 case relocInfo::none: 112 break; 113 default: 114 ShouldNotReachHere(); 115 break; 116 } 117 } 118 119 // Implementation of Address 120 121 #ifdef _LP64 122 123 Address Address::make_array(ArrayAddress adr) { 124 // Not implementable on 64bit machines 125 // Should have been handled higher up the call chain. 126 ShouldNotReachHere(); 127 return Address(); 128 } 129 130 // exceedingly dangerous constructor 131 Address::Address(int disp, address loc, relocInfo::relocType rtype) { 132 _base = noreg; 133 _index = noreg; 134 _scale = no_scale; 135 _disp = disp; 136 _xmmindex = xnoreg; 137 _isxmmindex = false; 138 switch (rtype) { 139 case relocInfo::external_word_type: 140 _rspec = external_word_Relocation::spec(loc); 141 break; 142 case relocInfo::internal_word_type: 143 _rspec = internal_word_Relocation::spec(loc); 144 break; 145 case relocInfo::runtime_call_type: 146 // HMM 147 _rspec = runtime_call_Relocation::spec(); 148 break; 149 case relocInfo::poll_type: 150 case relocInfo::poll_return_type: 151 _rspec = Relocation::spec_simple(rtype); 152 break; 153 case relocInfo::none: 154 break; 155 default: 156 ShouldNotReachHere(); 157 } 158 } 159 #else // LP64 160 161 Address Address::make_array(ArrayAddress adr) { 162 AddressLiteral base = adr.base(); 163 Address index = adr.index(); 164 assert(index._disp == 0, "must not have disp"); // maybe it can? 165 Address array(index._base, index._index, index._scale, (intptr_t) base.target()); 166 array._rspec = base._rspec; 167 return array; 168 } 169 170 // exceedingly dangerous constructor 171 Address::Address(address loc, RelocationHolder spec) { 172 _base = noreg; 173 _index = noreg; 174 _scale = no_scale; 175 _disp = (intptr_t) loc; 176 _rspec = spec; 177 _xmmindex = xnoreg; 178 _isxmmindex = false; 179 } 180 181 #endif // _LP64 182 183 184 185 // Convert the raw encoding form into the form expected by the constructor for 186 // Address. An index of 4 (rsp) corresponds to having no index, so convert 187 // that to noreg for the Address constructor. 188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 189 RelocationHolder rspec; 190 if (disp_reloc != relocInfo::none) { 191 rspec = Relocation::spec_simple(disp_reloc); 192 } 193 bool valid_index = index != rsp->encoding(); 194 if (valid_index) { 195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp)); 196 madr._rspec = rspec; 197 return madr; 198 } else { 199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp)); 200 madr._rspec = rspec; 201 return madr; 202 } 203 } 204 205 // Implementation of Assembler 206 207 int AbstractAssembler::code_fill_byte() { 208 return (u_char)'\xF4'; // hlt 209 } 210 211 // make this go away someday 212 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { 213 if (rtype == relocInfo::none) 214 emit_int32(data); 215 else 216 emit_data(data, Relocation::spec_simple(rtype), format); 217 } 218 219 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) { 220 assert(imm_operand == 0, "default format must be immediate in this file"); 221 assert(inst_mark() != NULL, "must be inside InstructionMark"); 222 if (rspec.type() != relocInfo::none) { 223 #ifdef ASSERT 224 check_relocation(rspec, format); 225 #endif 226 // Do not use AbstractAssembler::relocate, which is not intended for 227 // embedded words. Instead, relocate to the enclosing instruction. 228 229 // hack. call32 is too wide for mask so use disp32 230 if (format == call32_operand) 231 code_section()->relocate(inst_mark(), rspec, disp32_operand); 232 else 233 code_section()->relocate(inst_mark(), rspec, format); 234 } 235 emit_int32(data); 236 } 237 238 static int encode(Register r) { 239 int enc = r->encoding(); 240 if (enc >= 8) { 241 enc -= 8; 242 } 243 return enc; 244 } 245 246 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { 247 assert(dst->has_byte_register(), "must have byte register"); 248 assert(isByte(op1) && isByte(op2), "wrong opcode"); 249 assert(isByte(imm8), "not a byte"); 250 assert((op1 & 0x01) == 0, "should be 8bit operation"); 251 emit_int24(op1, op2 | encode(dst), imm8); 252 } 253 254 255 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { 256 assert(isByte(op1) && isByte(op2), "wrong opcode"); 257 assert((op1 & 0x01) == 1, "should be 32bit operation"); 258 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 259 if (is8bit(imm32)) { 260 // set sign bit 261 emit_int24(op1 | 0x02, op2 | encode(dst), imm32 & 0xFF); 262 } else { 263 emit_int16(op1, op2 | encode(dst)); 264 emit_int32(imm32); 265 } 266 } 267 268 // Force generation of a 4 byte immediate value even if it fits into 8bit 269 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { 270 assert(isByte(op1) && isByte(op2), "wrong opcode"); 271 assert((op1 & 0x01) == 1, "should be 32bit operation"); 272 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 273 emit_int16(op1, op2 | encode(dst)); 274 emit_int32(imm32); 275 } 276 277 // immediate-to-memory forms 278 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { 279 assert((op1 & 0x01) == 1, "should be 32bit operation"); 280 assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); 281 if (is8bit(imm32)) { 282 emit_int8(op1 | 0x02); // set sign bit 283 emit_operand(rm, adr, 1); 284 emit_int8(imm32 & 0xFF); 285 } else { 286 emit_int8(op1); 287 emit_operand(rm, adr, 4); 288 emit_int32(imm32); 289 } 290 } 291 292 293 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { 294 assert(isByte(op1) && isByte(op2), "wrong opcode"); 295 emit_int16(op1, op2 | encode(dst) << 3 | encode(src)); 296 } 297 298 299 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, 300 int cur_tuple_type, int in_size_in_bits, int cur_encoding) { 301 int mod_idx = 0; 302 // We will test if the displacement fits the compressed format and if so 303 // apply the compression to the displacment iff the result is8bit. 304 if (VM_Version::supports_evex() && is_evex_inst) { 305 switch (cur_tuple_type) { 306 case EVEX_FV: 307 if ((cur_encoding & VEX_W) == VEX_W) { 308 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 309 } else { 310 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 311 } 312 break; 313 314 case EVEX_HV: 315 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 316 break; 317 318 case EVEX_FVM: 319 break; 320 321 case EVEX_T1S: 322 switch (in_size_in_bits) { 323 case EVEX_8bit: 324 break; 325 326 case EVEX_16bit: 327 mod_idx = 1; 328 break; 329 330 case EVEX_32bit: 331 mod_idx = 2; 332 break; 333 334 case EVEX_64bit: 335 mod_idx = 3; 336 break; 337 } 338 break; 339 340 case EVEX_T1F: 341 case EVEX_T2: 342 case EVEX_T4: 343 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0; 344 break; 345 346 case EVEX_T8: 347 break; 348 349 case EVEX_HVM: 350 break; 351 352 case EVEX_QVM: 353 break; 354 355 case EVEX_OVM: 356 break; 357 358 case EVEX_M128: 359 break; 360 361 case EVEX_DUP: 362 break; 363 364 default: 365 assert(0, "no valid evex tuple_table entry"); 366 break; 367 } 368 369 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 370 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len]; 371 if ((disp % disp_factor) == 0) { 372 int new_disp = disp / disp_factor; 373 if ((-0x80 <= new_disp && new_disp < 0x80)) { 374 disp = new_disp; 375 } 376 } else { 377 return false; 378 } 379 } 380 } 381 return (-0x80 <= disp && disp < 0x80); 382 } 383 384 385 bool Assembler::emit_compressed_disp_byte(int &disp) { 386 int mod_idx = 0; 387 // We will test if the displacement fits the compressed format and if so 388 // apply the compression to the displacment iff the result is8bit. 389 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { 390 int evex_encoding = _attributes->get_evex_encoding(); 391 int tuple_type = _attributes->get_tuple_type(); 392 switch (tuple_type) { 393 case EVEX_FV: 394 if ((evex_encoding & VEX_W) == VEX_W) { 395 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2; 396 } else { 397 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 398 } 399 break; 400 401 case EVEX_HV: 402 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0; 403 break; 404 405 case EVEX_FVM: 406 break; 407 408 case EVEX_T1S: 409 switch (_attributes->get_input_size()) { 410 case EVEX_8bit: 411 break; 412 413 case EVEX_16bit: 414 mod_idx = 1; 415 break; 416 417 case EVEX_32bit: 418 mod_idx = 2; 419 break; 420 421 case EVEX_64bit: 422 mod_idx = 3; 423 break; 424 } 425 break; 426 427 case EVEX_T1F: 428 case EVEX_T2: 429 case EVEX_T4: 430 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0; 431 break; 432 433 case EVEX_T8: 434 break; 435 436 case EVEX_HVM: 437 break; 438 439 case EVEX_QVM: 440 break; 441 442 case EVEX_OVM: 443 break; 444 445 case EVEX_M128: 446 break; 447 448 case EVEX_DUP: 449 break; 450 451 default: 452 assert(0, "no valid evex tuple_table entry"); 453 break; 454 } 455 456 int vector_len = _attributes->get_vector_len(); 457 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) { 458 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len]; 459 if ((disp % disp_factor) == 0) { 460 int new_disp = disp / disp_factor; 461 if (is8bit(new_disp)) { 462 disp = new_disp; 463 } 464 } else { 465 return false; 466 } 467 } 468 } 469 return is8bit(disp); 470 } 471 472 473 void Assembler::emit_operand(Register reg, Register base, Register index, 474 Address::ScaleFactor scale, int disp, 475 RelocationHolder const& rspec, 476 int rip_relative_correction) { 477 bool no_relocation = rspec.type() == relocInfo::none; 478 479 // Encode the registers as needed in the fields they are used in 480 int regenc = encode(reg) << 3; 481 if (base->is_valid()) { 482 int baseenc = encode(base); 483 if (index->is_valid()) { 484 assert(scale != Address::no_scale, "inconsistent address"); 485 // [base + index*scale + disp] 486 int indexenc = encode(index) << 3; 487 if (disp == 0 && no_relocation && 488 base != rbp LP64_ONLY(&& base != r13)) { 489 // [base + index*scale] 490 // [00 reg 100][ss index base] 491 assert(index != rsp, "illegal addressing mode"); 492 emit_int16(0x04 | regenc, scale << 6 | indexenc | baseenc); 493 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 494 // [base + index*scale + imm8] 495 // [01 reg 100][ss index base] imm8 496 assert(index != rsp, "illegal addressing mode"); 497 emit_int24(0x44 | regenc, scale << 6 | indexenc | baseenc, disp & 0xFF); 498 } else { 499 // [base + index*scale + disp32] 500 // [10 reg 100][ss index base] disp32 501 assert(index != rsp, "illegal addressing mode"); 502 emit_int16(0x84 | regenc, scale << 6 | indexenc | baseenc); 503 emit_data(disp, rspec, disp32_operand); 504 } 505 } else if (base == rsp LP64_ONLY(|| base == r12)) { 506 // [rsp + disp] 507 if (disp == 0 && no_relocation) { 508 // [rsp] 509 // [00 reg 100][00 100 100] 510 emit_int16(0x04 | regenc, 0x24); 511 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 512 // [rsp + imm8] 513 // [01 reg 100][00 100 100] disp8 514 emit_int24(0x44 | regenc, 0x24, disp & 0xFF); 515 } else { 516 // [rsp + imm32] 517 // [10 reg 100][00 100 100] disp32 518 emit_int16(0x84 | regenc, 0x24); 519 emit_data(disp, rspec, disp32_operand); 520 } 521 } else { 522 // [base + disp] 523 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); 524 if (disp == 0 && no_relocation && 525 base != rbp LP64_ONLY(&& base != r13)) { 526 // [base] 527 // [00 reg base] 528 emit_int8(0x00 | regenc | baseenc); 529 } else if (emit_compressed_disp_byte(disp) && no_relocation) { 530 // [base + disp8] 531 // [01 reg base] disp8 532 emit_int16(0x40 | regenc | baseenc, disp & 0xFF); 533 } else { 534 // [base + disp32] 535 // [10 reg base] disp32 536 emit_int8(0x80 | regenc | baseenc); 537 emit_data(disp, rspec, disp32_operand); 538 } 539 } 540 } else { 541 if (index->is_valid()) { 542 assert(scale != Address::no_scale, "inconsistent address"); 543 // [index*scale + disp] 544 // [00 reg 100][ss index 101] disp32 545 assert(index != rsp, "illegal addressing mode"); 546 emit_int16(0x04 | regenc, scale << 6 | (encode(index) << 3) | 0x05); 547 emit_data(disp, rspec, disp32_operand); 548 } else if (!no_relocation) { 549 // [disp] (64bit) RIP-RELATIVE (32bit) abs 550 // [00 000 101] disp32 551 552 emit_int8(0x05 | regenc); 553 // Note that the RIP-rel. correction applies to the generated 554 // disp field, but _not_ to the target address in the rspec. 555 556 // disp was created by converting the target address minus the pc 557 // at the start of the instruction. That needs more correction here. 558 // intptr_t disp = target - next_ip; 559 assert(inst_mark() != NULL, "must be inside InstructionMark"); 560 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction; 561 int64_t adjusted = disp; 562 // Do rip-rel adjustment for 64bit 563 LP64_ONLY(adjusted -= (next_ip - inst_mark())); 564 assert(is_simm32(adjusted), 565 "must be 32bit offset (RIP relative address)"); 566 emit_data((int32_t) adjusted, rspec, disp32_operand); 567 568 } else { 569 // 32bit never did this, did everything as the rip-rel/disp code above 570 // [disp] ABSOLUTE 571 // [00 reg 100][00 100 101] disp32 572 emit_int16(0x04 | regenc, 0x25); 573 emit_data(disp, rspec, disp32_operand); 574 } 575 } 576 } 577 578 void Assembler::emit_operand(XMMRegister reg, Register base, Register index, 579 Address::ScaleFactor scale, int disp, 580 RelocationHolder const& rspec) { 581 if (UseAVX > 2) { 582 int xreg_enc = reg->encoding(); 583 if (xreg_enc > 15) { 584 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 585 emit_operand((Register)new_reg, base, index, scale, disp, rspec); 586 return; 587 } 588 } 589 emit_operand((Register)reg, base, index, scale, disp, rspec); 590 } 591 592 void Assembler::emit_operand(XMMRegister reg, Register base, XMMRegister index, 593 Address::ScaleFactor scale, int disp, 594 RelocationHolder const& rspec) { 595 if (UseAVX > 2) { 596 int xreg_enc = reg->encoding(); 597 int xmmindex_enc = index->encoding(); 598 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf); 599 XMMRegister new_index = as_XMMRegister(xmmindex_enc & 0xf); 600 emit_operand((Register)new_reg, base, (Register)new_index, scale, disp, rspec); 601 } else { 602 emit_operand((Register)reg, base, (Register)index, scale, disp, rspec); 603 } 604 } 605 606 607 // Secret local extension to Assembler::WhichOperand: 608 #define end_pc_operand (_WhichOperand_limit) 609 610 address Assembler::locate_operand(address inst, WhichOperand which) { 611 // Decode the given instruction, and return the address of 612 // an embedded 32-bit operand word. 613 614 // If "which" is disp32_operand, selects the displacement portion 615 // of an effective address specifier. 616 // If "which" is imm64_operand, selects the trailing immediate constant. 617 // If "which" is call32_operand, selects the displacement of a call or jump. 618 // Caller is responsible for ensuring that there is such an operand, 619 // and that it is 32/64 bits wide. 620 621 // If "which" is end_pc_operand, find the end of the instruction. 622 623 address ip = inst; 624 bool is_64bit = false; 625 626 debug_only(bool has_disp32 = false); 627 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn 628 629 again_after_prefix: 630 switch (0xFF & *ip++) { 631 632 // These convenience macros generate groups of "case" labels for the switch. 633 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 634 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ 635 case (x)+4: case (x)+5: case (x)+6: case (x)+7 636 #define REP16(x) REP8((x)+0): \ 637 case REP8((x)+8) 638 639 case CS_segment: 640 case SS_segment: 641 case DS_segment: 642 case ES_segment: 643 case FS_segment: 644 case GS_segment: 645 // Seems dubious 646 LP64_ONLY(assert(false, "shouldn't have that prefix")); 647 assert(ip == inst+1, "only one prefix allowed"); 648 goto again_after_prefix; 649 650 case 0x67: 651 case REX: 652 case REX_B: 653 case REX_X: 654 case REX_XB: 655 case REX_R: 656 case REX_RB: 657 case REX_RX: 658 case REX_RXB: 659 NOT_LP64(assert(false, "64bit prefixes")); 660 goto again_after_prefix; 661 662 case REX_W: 663 case REX_WB: 664 case REX_WX: 665 case REX_WXB: 666 case REX_WR: 667 case REX_WRB: 668 case REX_WRX: 669 case REX_WRXB: 670 NOT_LP64(assert(false, "64bit prefixes")); 671 is_64bit = true; 672 goto again_after_prefix; 673 674 case 0xFF: // pushq a; decl a; incl a; call a; jmp a 675 case 0x88: // movb a, r 676 case 0x89: // movl a, r 677 case 0x8A: // movb r, a 678 case 0x8B: // movl r, a 679 case 0x8F: // popl a 680 debug_only(has_disp32 = true); 681 break; 682 683 case 0x68: // pushq #32 684 if (which == end_pc_operand) { 685 return ip + 4; 686 } 687 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); 688 return ip; // not produced by emit_operand 689 690 case 0x66: // movw ... (size prefix) 691 again_after_size_prefix2: 692 switch (0xFF & *ip++) { 693 case REX: 694 case REX_B: 695 case REX_X: 696 case REX_XB: 697 case REX_R: 698 case REX_RB: 699 case REX_RX: 700 case REX_RXB: 701 case REX_W: 702 case REX_WB: 703 case REX_WX: 704 case REX_WXB: 705 case REX_WR: 706 case REX_WRB: 707 case REX_WRX: 708 case REX_WRXB: 709 NOT_LP64(assert(false, "64bit prefix found")); 710 goto again_after_size_prefix2; 711 case 0x8B: // movw r, a 712 case 0x89: // movw a, r 713 debug_only(has_disp32 = true); 714 break; 715 case 0xC7: // movw a, #16 716 debug_only(has_disp32 = true); 717 tail_size = 2; // the imm16 718 break; 719 case 0x0F: // several SSE/SSE2 variants 720 ip--; // reparse the 0x0F 721 goto again_after_prefix; 722 default: 723 ShouldNotReachHere(); 724 } 725 break; 726 727 case REP8(0xB8): // movl/q r, #32/#64(oop?) 728 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); 729 // these asserts are somewhat nonsensical 730 #ifndef _LP64 731 assert(which == imm_operand || which == disp32_operand, 732 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 733 #else 734 assert((which == call32_operand || which == imm_operand) && is_64bit || 735 which == narrow_oop_operand && !is_64bit, 736 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); 737 #endif // _LP64 738 return ip; 739 740 case 0x69: // imul r, a, #32 741 case 0xC7: // movl a, #32(oop?) 742 tail_size = 4; 743 debug_only(has_disp32 = true); // has both kinds of operands! 744 break; 745 746 case 0x0F: // movx..., etc. 747 switch (0xFF & *ip++) { 748 case 0x3A: // pcmpestri 749 tail_size = 1; 750 case 0x38: // ptest, pmovzxbw 751 ip++; // skip opcode 752 debug_only(has_disp32 = true); // has both kinds of operands! 753 break; 754 755 case 0x70: // pshufd r, r/a, #8 756 debug_only(has_disp32 = true); // has both kinds of operands! 757 case 0x73: // psrldq r, #8 758 tail_size = 1; 759 break; 760 761 case 0x12: // movlps 762 case 0x28: // movaps 763 case 0x2E: // ucomiss 764 case 0x2F: // comiss 765 case 0x54: // andps 766 case 0x55: // andnps 767 case 0x56: // orps 768 case 0x57: // xorps 769 case 0x58: // addpd 770 case 0x59: // mulpd 771 case 0x6E: // movd 772 case 0x7E: // movd 773 case 0x6F: // movdq 774 case 0x7F: // movdq 775 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush 776 case 0xFE: // paddd 777 debug_only(has_disp32 = true); 778 break; 779 780 case 0xAD: // shrd r, a, %cl 781 case 0xAF: // imul r, a 782 case 0xBE: // movsbl r, a (movsxb) 783 case 0xBF: // movswl r, a (movsxw) 784 case 0xB6: // movzbl r, a (movzxb) 785 case 0xB7: // movzwl r, a (movzxw) 786 case REP16(0x40): // cmovl cc, r, a 787 case 0xB0: // cmpxchgb 788 case 0xB1: // cmpxchg 789 case 0xC1: // xaddl 790 case 0xC7: // cmpxchg8 791 case REP16(0x90): // setcc a 792 debug_only(has_disp32 = true); 793 // fall out of the switch to decode the address 794 break; 795 796 case 0xC4: // pinsrw r, a, #8 797 debug_only(has_disp32 = true); 798 case 0xC5: // pextrw r, r, #8 799 tail_size = 1; // the imm8 800 break; 801 802 case 0xAC: // shrd r, a, #8 803 debug_only(has_disp32 = true); 804 tail_size = 1; // the imm8 805 break; 806 807 case REP16(0x80): // jcc rdisp32 808 if (which == end_pc_operand) return ip + 4; 809 assert(which == call32_operand, "jcc has no disp32 or imm"); 810 return ip; 811 default: 812 ShouldNotReachHere(); 813 } 814 break; 815 816 case 0x81: // addl a, #32; addl r, #32 817 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 818 // on 32bit in the case of cmpl, the imm might be an oop 819 tail_size = 4; 820 debug_only(has_disp32 = true); // has both kinds of operands! 821 break; 822 823 case 0x83: // addl a, #8; addl r, #8 824 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl 825 debug_only(has_disp32 = true); // has both kinds of operands! 826 tail_size = 1; 827 break; 828 829 case 0x9B: 830 switch (0xFF & *ip++) { 831 case 0xD9: // fnstcw a 832 debug_only(has_disp32 = true); 833 break; 834 default: 835 ShouldNotReachHere(); 836 } 837 break; 838 839 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a 840 case REP4(0x10): // adc... 841 case REP4(0x20): // and... 842 case REP4(0x30): // xor... 843 case REP4(0x08): // or... 844 case REP4(0x18): // sbb... 845 case REP4(0x28): // sub... 846 case 0xF7: // mull a 847 case 0x8D: // lea r, a 848 case 0x87: // xchg r, a 849 case REP4(0x38): // cmp... 850 case 0x85: // test r, a 851 debug_only(has_disp32 = true); // has both kinds of operands! 852 break; 853 854 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 855 case 0xC6: // movb a, #8 856 case 0x80: // cmpb a, #8 857 case 0x6B: // imul r, a, #8 858 debug_only(has_disp32 = true); // has both kinds of operands! 859 tail_size = 1; // the imm8 860 break; 861 862 case 0xC4: // VEX_3bytes 863 case 0xC5: // VEX_2bytes 864 assert((UseAVX > 0), "shouldn't have VEX prefix"); 865 assert(ip == inst+1, "no prefixes allowed"); 866 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions 867 // but they have prefix 0x0F and processed when 0x0F processed above. 868 // 869 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES 870 // instructions (these instructions are not supported in 64-bit mode). 871 // To distinguish them bits [7:6] are set in the VEX second byte since 872 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set 873 // those VEX bits REX and vvvv bits are inverted. 874 // 875 // Fortunately C2 doesn't generate these instructions so we don't need 876 // to check for them in product version. 877 878 // Check second byte 879 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions")); 880 881 int vex_opcode; 882 // First byte 883 if ((0xFF & *inst) == VEX_3bytes) { 884 vex_opcode = VEX_OPCODE_MASK & *ip; 885 ip++; // third byte 886 is_64bit = ((VEX_W & *ip) == VEX_W); 887 } else { 888 vex_opcode = VEX_OPCODE_0F; 889 } 890 ip++; // opcode 891 // To find the end of instruction (which == end_pc_operand). 892 switch (vex_opcode) { 893 case VEX_OPCODE_0F: 894 switch (0xFF & *ip) { 895 case 0x70: // pshufd r, r/a, #8 896 case 0x71: // ps[rl|ra|ll]w r, #8 897 case 0x72: // ps[rl|ra|ll]d r, #8 898 case 0x73: // ps[rl|ra|ll]q r, #8 899 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 900 case 0xC4: // pinsrw r, r, r/a, #8 901 case 0xC5: // pextrw r/a, r, #8 902 case 0xC6: // shufp[s|d] r, r, r/a, #8 903 tail_size = 1; // the imm8 904 break; 905 } 906 break; 907 case VEX_OPCODE_0F_3A: 908 tail_size = 1; 909 break; 910 } 911 ip++; // skip opcode 912 debug_only(has_disp32 = true); // has both kinds of operands! 913 break; 914 915 case 0x62: // EVEX_4bytes 916 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix"); 917 assert(ip == inst+1, "no prefixes allowed"); 918 // no EVEX collisions, all instructions that have 0x62 opcodes 919 // have EVEX versions and are subopcodes of 0x66 920 ip++; // skip P0 and exmaine W in P1 921 is_64bit = ((VEX_W & *ip) == VEX_W); 922 ip++; // move to P2 923 ip++; // skip P2, move to opcode 924 // To find the end of instruction (which == end_pc_operand). 925 switch (0xFF & *ip) { 926 case 0x22: // pinsrd r, r/a, #8 927 case 0x61: // pcmpestri r, r/a, #8 928 case 0x70: // pshufd r, r/a, #8 929 case 0x73: // psrldq r, #8 930 tail_size = 1; // the imm8 931 break; 932 default: 933 break; 934 } 935 ip++; // skip opcode 936 debug_only(has_disp32 = true); // has both kinds of operands! 937 break; 938 939 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 940 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl 941 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a 942 case 0xDD: // fld_d a; fst_d a; fstp_d a 943 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a 944 case 0xDF: // fild_d a; fistp_d a 945 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a 946 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a 947 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a 948 debug_only(has_disp32 = true); 949 break; 950 951 case 0xE8: // call rdisp32 952 case 0xE9: // jmp rdisp32 953 if (which == end_pc_operand) return ip + 4; 954 assert(which == call32_operand, "call has no disp32 or imm"); 955 return ip; 956 957 case 0xF0: // Lock 958 goto again_after_prefix; 959 960 case 0xF3: // For SSE 961 case 0xF2: // For SSE2 962 switch (0xFF & *ip++) { 963 case REX: 964 case REX_B: 965 case REX_X: 966 case REX_XB: 967 case REX_R: 968 case REX_RB: 969 case REX_RX: 970 case REX_RXB: 971 case REX_W: 972 case REX_WB: 973 case REX_WX: 974 case REX_WXB: 975 case REX_WR: 976 case REX_WRB: 977 case REX_WRX: 978 case REX_WRXB: 979 NOT_LP64(assert(false, "found 64bit prefix")); 980 ip++; 981 default: 982 ip++; 983 } 984 debug_only(has_disp32 = true); // has both kinds of operands! 985 break; 986 987 default: 988 ShouldNotReachHere(); 989 990 #undef REP8 991 #undef REP16 992 } 993 994 assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); 995 #ifdef _LP64 996 assert(which != imm_operand, "instruction is not a movq reg, imm64"); 997 #else 998 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field"); 999 assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); 1000 #endif // LP64 1001 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field"); 1002 1003 // parse the output of emit_operand 1004 int op2 = 0xFF & *ip++; 1005 int base = op2 & 0x07; 1006 int op3 = -1; 1007 const int b100 = 4; 1008 const int b101 = 5; 1009 if (base == b100 && (op2 >> 6) != 3) { 1010 op3 = 0xFF & *ip++; 1011 base = op3 & 0x07; // refetch the base 1012 } 1013 // now ip points at the disp (if any) 1014 1015 switch (op2 >> 6) { 1016 case 0: 1017 // [00 reg 100][ss index base] 1018 // [00 reg 100][00 100 esp] 1019 // [00 reg base] 1020 // [00 reg 100][ss index 101][disp32] 1021 // [00 reg 101] [disp32] 1022 1023 if (base == b101) { 1024 if (which == disp32_operand) 1025 return ip; // caller wants the disp32 1026 ip += 4; // skip the disp32 1027 } 1028 break; 1029 1030 case 1: 1031 // [01 reg 100][ss index base][disp8] 1032 // [01 reg 100][00 100 esp][disp8] 1033 // [01 reg base] [disp8] 1034 ip += 1; // skip the disp8 1035 break; 1036 1037 case 2: 1038 // [10 reg 100][ss index base][disp32] 1039 // [10 reg 100][00 100 esp][disp32] 1040 // [10 reg base] [disp32] 1041 if (which == disp32_operand) 1042 return ip; // caller wants the disp32 1043 ip += 4; // skip the disp32 1044 break; 1045 1046 case 3: 1047 // [11 reg base] (not a memory addressing mode) 1048 break; 1049 } 1050 1051 if (which == end_pc_operand) { 1052 return ip + tail_size; 1053 } 1054 1055 #ifdef _LP64 1056 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); 1057 #else 1058 assert(which == imm_operand, "instruction has only an imm field"); 1059 #endif // LP64 1060 return ip; 1061 } 1062 1063 address Assembler::locate_next_instruction(address inst) { 1064 // Secretly share code with locate_operand: 1065 return locate_operand(inst, end_pc_operand); 1066 } 1067 1068 1069 #ifdef ASSERT 1070 void Assembler::check_relocation(RelocationHolder const& rspec, int format) { 1071 address inst = inst_mark(); 1072 assert(inst != NULL && inst < pc(), "must point to beginning of instruction"); 1073 address opnd; 1074 1075 Relocation* r = rspec.reloc(); 1076 if (r->type() == relocInfo::none) { 1077 return; 1078 } else if (r->is_call() || format == call32_operand) { 1079 // assert(format == imm32_operand, "cannot specify a nonzero format"); 1080 opnd = locate_operand(inst, call32_operand); 1081 } else if (r->is_data()) { 1082 assert(format == imm_operand || format == disp32_operand 1083 LP64_ONLY(|| format == narrow_oop_operand), "format ok"); 1084 opnd = locate_operand(inst, (WhichOperand)format); 1085 } else { 1086 assert(format == imm_operand, "cannot specify a format"); 1087 return; 1088 } 1089 assert(opnd == pc(), "must put operand where relocs can find it"); 1090 } 1091 #endif // ASSERT 1092 1093 void Assembler::emit_operand32(Register reg, Address adr) { 1094 assert(reg->encoding() < 8, "no extended registers"); 1095 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1096 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1097 adr._rspec); 1098 } 1099 1100 void Assembler::emit_operand(Register reg, Address adr, 1101 int rip_relative_correction) { 1102 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1103 adr._rspec, 1104 rip_relative_correction); 1105 } 1106 1107 void Assembler::emit_operand(XMMRegister reg, Address adr) { 1108 if (adr.isxmmindex()) { 1109 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec); 1110 } else { 1111 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, 1112 adr._rspec); 1113 } 1114 } 1115 1116 // MMX operations 1117 void Assembler::emit_operand(MMXRegister reg, Address adr) { 1118 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1119 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1120 } 1121 1122 // work around gcc (3.2.1-7a) bug 1123 void Assembler::emit_operand(Address adr, MMXRegister reg) { 1124 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers"); 1125 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec); 1126 } 1127 1128 1129 void Assembler::emit_farith(int b1, int b2, int i) { 1130 assert(isByte(b1) && isByte(b2), "wrong opcode"); 1131 assert(0 <= i && i < 8, "illegal stack offset"); 1132 emit_int16(b1, b2 + i); 1133 } 1134 1135 1136 // Now the Assembler instructions (identical for 32/64 bits) 1137 1138 void Assembler::adcl(Address dst, int32_t imm32) { 1139 InstructionMark im(this); 1140 prefix(dst); 1141 emit_arith_operand(0x81, rdx, dst, imm32); 1142 } 1143 1144 void Assembler::adcl(Address dst, Register src) { 1145 InstructionMark im(this); 1146 prefix(dst, src); 1147 emit_int8(0x11); 1148 emit_operand(src, dst); 1149 } 1150 1151 void Assembler::adcl(Register dst, int32_t imm32) { 1152 prefix(dst); 1153 emit_arith(0x81, 0xD0, dst, imm32); 1154 } 1155 1156 void Assembler::adcl(Register dst, Address src) { 1157 InstructionMark im(this); 1158 prefix(src, dst); 1159 emit_int8(0x13); 1160 emit_operand(dst, src); 1161 } 1162 1163 void Assembler::adcl(Register dst, Register src) { 1164 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1165 emit_arith(0x13, 0xC0, dst, src); 1166 } 1167 1168 void Assembler::addl(Address dst, int32_t imm32) { 1169 InstructionMark im(this); 1170 prefix(dst); 1171 emit_arith_operand(0x81, rax, dst, imm32); 1172 } 1173 1174 void Assembler::addb(Address dst, int imm8) { 1175 InstructionMark im(this); 1176 prefix(dst); 1177 emit_int8((unsigned char)0x80); 1178 emit_operand(rax, dst, 1); 1179 emit_int8(imm8); 1180 } 1181 1182 void Assembler::addw(Address dst, int imm16) { 1183 InstructionMark im(this); 1184 emit_int8(0x66); 1185 prefix(dst); 1186 emit_int8((unsigned char)0x81); 1187 emit_operand(rax, dst, 2); 1188 emit_int16(imm16); 1189 } 1190 1191 void Assembler::addl(Address dst, Register src) { 1192 InstructionMark im(this); 1193 prefix(dst, src); 1194 emit_int8(0x01); 1195 emit_operand(src, dst); 1196 } 1197 1198 void Assembler::addl(Register dst, int32_t imm32) { 1199 prefix(dst); 1200 emit_arith(0x81, 0xC0, dst, imm32); 1201 } 1202 1203 void Assembler::addl(Register dst, Address src) { 1204 InstructionMark im(this); 1205 prefix(src, dst); 1206 emit_int8(0x03); 1207 emit_operand(dst, src); 1208 } 1209 1210 void Assembler::addl(Register dst, Register src) { 1211 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1212 emit_arith(0x03, 0xC0, dst, src); 1213 } 1214 1215 void Assembler::addr_nop_4() { 1216 assert(UseAddressNop, "no CPU support"); 1217 // 4 bytes: NOP DWORD PTR [EAX+0] 1218 emit_int32(0x0F, 1219 0x1F, 1220 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); 1221 0); // 8-bits offset (1 byte) 1222 } 1223 1224 void Assembler::addr_nop_5() { 1225 assert(UseAddressNop, "no CPU support"); 1226 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset 1227 emit_int32(0x0F, 1228 0x1F, 1229 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4); 1230 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1231 emit_int8(0); // 8-bits offset (1 byte) 1232 } 1233 1234 void Assembler::addr_nop_7() { 1235 assert(UseAddressNop, "no CPU support"); 1236 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset 1237 emit_int24(0x0F, 1238 0x1F, 1239 (unsigned char)0x80); 1240 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); 1241 emit_int32(0); // 32-bits offset (4 bytes) 1242 } 1243 1244 void Assembler::addr_nop_8() { 1245 assert(UseAddressNop, "no CPU support"); 1246 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset 1247 emit_int32(0x0F, 1248 0x1F, 1249 (unsigned char)0x84, 1250 // emit_rm(cbuf, 0x2, EAX_enc, 0x4); 1251 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); 1252 emit_int32(0); // 32-bits offset (4 bytes) 1253 } 1254 1255 void Assembler::addsd(XMMRegister dst, XMMRegister src) { 1256 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1257 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1258 attributes.set_rex_vex_w_reverted(); 1259 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1260 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 1261 } 1262 1263 void Assembler::addsd(XMMRegister dst, Address src) { 1264 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1265 InstructionMark im(this); 1266 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1267 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1268 attributes.set_rex_vex_w_reverted(); 1269 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1270 emit_int8(0x58); 1271 emit_operand(dst, src); 1272 } 1273 1274 void Assembler::addss(XMMRegister dst, XMMRegister src) { 1275 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1276 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1277 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1278 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 1279 } 1280 1281 void Assembler::addss(XMMRegister dst, Address src) { 1282 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1283 InstructionMark im(this); 1284 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1285 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1286 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1287 emit_int8(0x58); 1288 emit_operand(dst, src); 1289 } 1290 1291 void Assembler::aesdec(XMMRegister dst, Address src) { 1292 assert(VM_Version::supports_aes(), ""); 1293 InstructionMark im(this); 1294 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1295 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1296 emit_int8((unsigned char)0xDE); 1297 emit_operand(dst, src); 1298 } 1299 1300 void Assembler::aesdec(XMMRegister dst, XMMRegister src) { 1301 assert(VM_Version::supports_aes(), ""); 1302 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1303 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1304 emit_int16((unsigned char)0xDE, 0xC0 | encode); 1305 } 1306 1307 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1308 assert(VM_Version::supports_avx512_vaes(), ""); 1309 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1310 attributes.set_is_evex_instruction(); 1311 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1312 emit_int16((unsigned char)0xDE, (unsigned char)(0xC0 | encode)); 1313 } 1314 1315 1316 void Assembler::aesdeclast(XMMRegister dst, Address src) { 1317 assert(VM_Version::supports_aes(), ""); 1318 InstructionMark im(this); 1319 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1320 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1321 emit_int8((unsigned char)0xDF); 1322 emit_operand(dst, src); 1323 } 1324 1325 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { 1326 assert(VM_Version::supports_aes(), ""); 1327 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1328 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1329 emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); 1330 } 1331 1332 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1333 assert(VM_Version::supports_avx512_vaes(), ""); 1334 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1335 attributes.set_is_evex_instruction(); 1336 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1337 emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); 1338 } 1339 1340 void Assembler::aesenc(XMMRegister dst, Address src) { 1341 assert(VM_Version::supports_aes(), ""); 1342 InstructionMark im(this); 1343 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1344 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1345 emit_int8((unsigned char)0xDC); 1346 emit_operand(dst, src); 1347 } 1348 1349 void Assembler::aesenc(XMMRegister dst, XMMRegister src) { 1350 assert(VM_Version::supports_aes(), ""); 1351 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1352 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1353 emit_int16((unsigned char)0xDC, 0xC0 | encode); 1354 } 1355 1356 void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1357 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1358 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1359 attributes.set_is_evex_instruction(); 1360 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1361 emit_int16((unsigned char)0xDC, (unsigned char)(0xC0 | encode)); 1362 } 1363 1364 void Assembler::aesenclast(XMMRegister dst, Address src) { 1365 assert(VM_Version::supports_aes(), ""); 1366 InstructionMark im(this); 1367 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1368 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1369 emit_int8((unsigned char)0xDD); 1370 emit_operand(dst, src); 1371 } 1372 1373 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { 1374 assert(VM_Version::supports_aes(), ""); 1375 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1376 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1377 emit_int16((unsigned char)0xDD, (unsigned char)(0xC0 | encode)); 1378 } 1379 1380 void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1381 assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); 1382 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1383 attributes.set_is_evex_instruction(); 1384 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1385 emit_int16((unsigned char)0xDD, (unsigned char)(0xC0 | encode)); 1386 } 1387 1388 void Assembler::andl(Address dst, int32_t imm32) { 1389 InstructionMark im(this); 1390 prefix(dst); 1391 emit_int8((unsigned char)0x81); 1392 emit_operand(rsp, dst, 4); 1393 emit_int32(imm32); 1394 } 1395 1396 void Assembler::andl(Register dst, int32_t imm32) { 1397 prefix(dst); 1398 emit_arith(0x81, 0xE0, dst, imm32); 1399 } 1400 1401 void Assembler::andl(Register dst, Address src) { 1402 InstructionMark im(this); 1403 prefix(src, dst); 1404 emit_int8(0x23); 1405 emit_operand(dst, src); 1406 } 1407 1408 void Assembler::andl(Register dst, Register src) { 1409 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1410 emit_arith(0x23, 0xC0, dst, src); 1411 } 1412 1413 void Assembler::andnl(Register dst, Register src1, Register src2) { 1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1415 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1416 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1417 emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); 1418 } 1419 1420 void Assembler::andnl(Register dst, Register src1, Address src2) { 1421 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1422 InstructionMark im(this); 1423 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1424 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1425 emit_int8((unsigned char)0xF2); 1426 emit_operand(dst, src2); 1427 } 1428 1429 void Assembler::bsfl(Register dst, Register src) { 1430 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1431 emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); 1432 } 1433 1434 void Assembler::bsrl(Register dst, Register src) { 1435 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1436 emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); 1437 } 1438 1439 void Assembler::bswapl(Register reg) { // bswap 1440 int encode = prefix_and_encode(reg->encoding()); 1441 emit_int16(0x0F, (unsigned char)(0xC8 | encode)); 1442 } 1443 1444 void Assembler::blsil(Register dst, Register src) { 1445 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1446 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1447 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1448 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 1449 } 1450 1451 void Assembler::blsil(Register dst, Address src) { 1452 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1453 InstructionMark im(this); 1454 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1455 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1456 emit_int8((unsigned char)0xF3); 1457 emit_operand(rbx, src); 1458 } 1459 1460 void Assembler::blsmskl(Register dst, Register src) { 1461 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1462 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1463 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1464 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 1465 } 1466 1467 void Assembler::blsmskl(Register dst, Address src) { 1468 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1469 InstructionMark im(this); 1470 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1471 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1472 emit_int8((unsigned char)0xF3); 1473 emit_operand(rdx, src); 1474 } 1475 1476 void Assembler::blsrl(Register dst, Register src) { 1477 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1478 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1479 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1480 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 1481 } 1482 1483 void Assembler::blsrl(Register dst, Address src) { 1484 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 1485 InstructionMark im(this); 1486 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 1487 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 1488 emit_int8((unsigned char)0xF3); 1489 emit_operand(rcx, src); 1490 } 1491 1492 void Assembler::call(Label& L, relocInfo::relocType rtype) { 1493 // suspect disp32 is always good 1494 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand); 1495 1496 if (L.is_bound()) { 1497 const int long_size = 5; 1498 int offs = (int)( target(L) - pc() ); 1499 assert(offs <= 0, "assembler error"); 1500 InstructionMark im(this); 1501 // 1110 1000 #32-bit disp 1502 emit_int8((unsigned char)0xE8); 1503 emit_data(offs - long_size, rtype, operand); 1504 } else { 1505 InstructionMark im(this); 1506 // 1110 1000 #32-bit disp 1507 L.add_patch_at(code(), locator()); 1508 1509 emit_int8((unsigned char)0xE8); 1510 emit_data(int(0), rtype, operand); 1511 } 1512 } 1513 1514 void Assembler::call(Register dst) { 1515 int encode = prefix_and_encode(dst->encoding()); 1516 emit_int16((unsigned char)0xFF, (unsigned char)(0xD0 | encode)); 1517 } 1518 1519 1520 void Assembler::call(Address adr) { 1521 InstructionMark im(this); 1522 prefix(adr); 1523 emit_int8((unsigned char)0xFF); 1524 emit_operand(rdx, adr); 1525 } 1526 1527 void Assembler::call_literal(address entry, RelocationHolder const& rspec) { 1528 InstructionMark im(this); 1529 emit_int8((unsigned char)0xE8); 1530 intptr_t disp = entry - (pc() + sizeof(int32_t)); 1531 // Entry is NULL in case of a scratch emit. 1532 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); 1533 // Technically, should use call32_operand, but this format is 1534 // implied by the fact that we're emitting a call instruction. 1535 1536 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand); 1537 emit_data((int) disp, rspec, operand); 1538 } 1539 1540 void Assembler::cdql() { 1541 emit_int8((unsigned char)0x99); 1542 } 1543 1544 void Assembler::cld() { 1545 emit_int8((unsigned char)0xFC); 1546 } 1547 1548 void Assembler::cmovl(Condition cc, Register dst, Register src) { 1549 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1550 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1551 emit_int24(0x0F, 0x40 | cc, (unsigned char)(0xC0 | encode)); 1552 } 1553 1554 1555 void Assembler::cmovl(Condition cc, Register dst, Address src) { 1556 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); 1557 prefix(src, dst); 1558 emit_int16(0x0F, 0x40 | cc); 1559 emit_operand(dst, src); 1560 } 1561 1562 void Assembler::cmpb(Address dst, int imm8) { 1563 InstructionMark im(this); 1564 prefix(dst); 1565 emit_int8((unsigned char)0x80); 1566 emit_operand(rdi, dst, 1); 1567 emit_int8(imm8); 1568 } 1569 1570 void Assembler::cmpl(Address dst, int32_t imm32) { 1571 InstructionMark im(this); 1572 prefix(dst); 1573 emit_int8((unsigned char)0x81); 1574 emit_operand(rdi, dst, 4); 1575 emit_int32(imm32); 1576 } 1577 1578 void Assembler::cmpl(Register dst, int32_t imm32) { 1579 prefix(dst); 1580 emit_arith(0x81, 0xF8, dst, imm32); 1581 } 1582 1583 void Assembler::cmpl(Register dst, Register src) { 1584 (void) prefix_and_encode(dst->encoding(), src->encoding()); 1585 emit_arith(0x3B, 0xC0, dst, src); 1586 } 1587 1588 void Assembler::cmpl(Register dst, Address src) { 1589 InstructionMark im(this); 1590 prefix(src, dst); 1591 emit_int8((unsigned char)0x3B); 1592 emit_operand(dst, src); 1593 } 1594 1595 void Assembler::cmpw(Address dst, int imm16) { 1596 InstructionMark im(this); 1597 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); 1598 emit_int16(0x66, (unsigned char)0x81); 1599 emit_operand(rdi, dst, 2); 1600 emit_int16(imm16); 1601 } 1602 1603 // The 32-bit cmpxchg compares the value at adr with the contents of rax, 1604 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1605 // The ZF is set if the compared values were equal, and cleared otherwise. 1606 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg 1607 InstructionMark im(this); 1608 prefix(adr, reg); 1609 emit_int16(0x0F, (unsigned char)0xB1); 1610 emit_operand(reg, adr); 1611 } 1612 1613 // The 8-bit cmpxchg compares the value at adr with the contents of rax, 1614 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. 1615 // The ZF is set if the compared values were equal, and cleared otherwise. 1616 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg 1617 InstructionMark im(this); 1618 prefix(adr, reg, true); 1619 emit_int16(0x0F, (unsigned char)0xB0); 1620 emit_operand(reg, adr); 1621 } 1622 1623 void Assembler::comisd(XMMRegister dst, Address src) { 1624 // NOTE: dbx seems to decode this as comiss even though the 1625 // 0x66 is there. Strangly ucomisd comes out correct 1626 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1627 InstructionMark im(this); 1628 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);; 1629 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1630 attributes.set_rex_vex_w_reverted(); 1631 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1632 emit_int8(0x2F); 1633 emit_operand(dst, src); 1634 } 1635 1636 void Assembler::comisd(XMMRegister dst, XMMRegister src) { 1637 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1638 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1639 attributes.set_rex_vex_w_reverted(); 1640 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1641 emit_int16(0x2F, (unsigned char)(0xC0 | encode)); 1642 } 1643 1644 void Assembler::comiss(XMMRegister dst, Address src) { 1645 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1646 InstructionMark im(this); 1647 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1648 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1649 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1650 emit_int8(0x2F); 1651 emit_operand(dst, src); 1652 } 1653 1654 void Assembler::comiss(XMMRegister dst, XMMRegister src) { 1655 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1656 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1657 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1658 emit_int16(0x2F, (unsigned char)(0xC0 | encode)); 1659 } 1660 1661 void Assembler::cpuid() { 1662 emit_int16(0x0F, (unsigned char)0xA2); 1663 } 1664 1665 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented 1666 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v 1667 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - 1668 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - 1669 // 1670 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v 1671 // 1672 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v 1673 // 1674 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v 1675 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) { 1676 assert(VM_Version::supports_sse4_2(), ""); 1677 int8_t w = 0x01; 1678 Prefix p = Prefix_EMPTY; 1679 1680 emit_int8((int8_t)0xF2); 1681 switch (sizeInBytes) { 1682 case 1: 1683 w = 0; 1684 break; 1685 case 2: 1686 case 4: 1687 break; 1688 LP64_ONLY(case 8:) 1689 // This instruction is not valid in 32 bits 1690 // Note: 1691 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf 1692 // 1693 // Page B - 72 Vol. 2C says 1694 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 1695 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m 1696 // F0!!! 1697 // while 3 - 208 Vol. 2A 1698 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. 1699 // 1700 // the 0 on a last bit is reserved for a different flavor of this instruction : 1701 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8. 1702 p = REX_W; 1703 break; 1704 default: 1705 assert(0, "Unsupported value for a sizeInBytes argument"); 1706 break; 1707 } 1708 LP64_ONLY(prefix(crc, v, p);) 1709 emit_int32((int8_t)0x0F, 1710 0x38, 1711 (int8_t)(0xF0 | w), 1712 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); 1713 } 1714 1715 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { 1716 assert(VM_Version::supports_sse4_2(), ""); 1717 InstructionMark im(this); 1718 int8_t w = 0x01; 1719 Prefix p = Prefix_EMPTY; 1720 1721 emit_int8((int8_t)0xF2); 1722 switch (sizeInBytes) { 1723 case 1: 1724 w = 0; 1725 break; 1726 case 2: 1727 case 4: 1728 break; 1729 LP64_ONLY(case 8:) 1730 // This instruction is not valid in 32 bits 1731 p = REX_W; 1732 break; 1733 default: 1734 assert(0, "Unsupported value for a sizeInBytes argument"); 1735 break; 1736 } 1737 LP64_ONLY(prefix(crc, adr, p);) 1738 emit_int24(0x0F, 0x38, (unsigned char)(0xF0 | w)); 1739 emit_operand(crc, adr); 1740 } 1741 1742 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { 1743 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1744 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1745 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1746 emit_int16((unsigned char)0xE6, (unsigned char)(0xC0 | encode)); 1747 } 1748 1749 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { 1750 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1751 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1752 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 1753 emit_int16(0x5B, (unsigned char)(0xC0 | encode)); 1754 } 1755 1756 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { 1757 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1758 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1759 attributes.set_rex_vex_w_reverted(); 1760 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1761 emit_int16(0x5A, (unsigned char)(0xC0 | encode)); 1762 } 1763 1764 void Assembler::cvtsd2ss(XMMRegister dst, Address src) { 1765 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1766 InstructionMark im(this); 1767 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1768 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1769 attributes.set_rex_vex_w_reverted(); 1770 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1771 emit_int8(0x5A); 1772 emit_operand(dst, src); 1773 } 1774 1775 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { 1776 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1777 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1778 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1779 emit_int16(0x2A, (unsigned char)(0xC0 | encode)); 1780 } 1781 1782 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { 1783 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1784 InstructionMark im(this); 1785 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1786 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1787 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1788 emit_int8(0x2A); 1789 emit_operand(dst, src); 1790 } 1791 1792 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { 1793 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1794 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1795 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1796 emit_int16(0x2A, (unsigned char)(0xC0 | encode)); 1797 } 1798 1799 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { 1800 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1801 InstructionMark im(this); 1802 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1803 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1804 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1805 emit_int8(0x2A); 1806 emit_operand(dst, src); 1807 } 1808 1809 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { 1810 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1811 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1812 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1813 emit_int16(0x2A, (unsigned char)(0xC0 | encode)); 1814 } 1815 1816 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { 1817 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1818 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1819 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1820 emit_int16(0x5A, (unsigned char)(0xC0 | encode)); 1821 } 1822 1823 void Assembler::cvtss2sd(XMMRegister dst, Address src) { 1824 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1825 InstructionMark im(this); 1826 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1827 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1828 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1829 emit_int8(0x5A); 1830 emit_operand(dst, src); 1831 } 1832 1833 1834 void Assembler::cvttsd2sil(Register dst, XMMRegister src) { 1835 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1836 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1837 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1838 emit_int16(0x2C, (unsigned char)(0xC0 | encode)); 1839 } 1840 1841 void Assembler::cvttss2sil(Register dst, XMMRegister src) { 1842 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1843 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1844 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1845 emit_int16(0x2C, (unsigned char)(0xC0 | encode)); 1846 } 1847 1848 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { 1849 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1850 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 1851 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1852 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 1853 emit_int16((unsigned char)0xE6, (unsigned char)(0xC0 | encode)); 1854 } 1855 1856 void Assembler::pabsb(XMMRegister dst, XMMRegister src) { 1857 assert(VM_Version::supports_ssse3(), ""); 1858 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1859 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1860 emit_int16(0x1C, (unsigned char)(0xC0 | encode)); 1861 } 1862 1863 void Assembler::pabsw(XMMRegister dst, XMMRegister src) { 1864 assert(VM_Version::supports_ssse3(), ""); 1865 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1866 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1867 emit_int16(0x1D, (unsigned char)(0xC0 | encode)); 1868 } 1869 1870 void Assembler::pabsd(XMMRegister dst, XMMRegister src) { 1871 assert(VM_Version::supports_ssse3(), ""); 1872 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1873 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1874 emit_int16(0x1E, (unsigned char)(0xC0 | encode)); 1875 } 1876 1877 void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { 1878 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1879 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1880 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 1881 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1882 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1883 emit_int16(0x1C, (unsigned char)(0xC0 | encode)); 1884 } 1885 1886 void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { 1887 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1888 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1889 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 1890 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 1891 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1892 emit_int16(0x1D, (unsigned char)(0xC0 | encode)); 1893 } 1894 1895 void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { 1896 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 1897 vector_len == AVX_256bit? VM_Version::supports_avx2() : 1898 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); 1899 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1900 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1901 emit_int16(0x1E, (unsigned char)(0xC0 | encode)); 1902 } 1903 1904 void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { 1905 assert(UseAVX > 2, ""); 1906 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 1907 attributes.set_is_evex_instruction(); 1908 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 1909 emit_int16(0x1F, (unsigned char)(0xC0 | encode)); 1910 } 1911 1912 void Assembler::decl(Address dst) { 1913 // Don't use it directly. Use MacroAssembler::decrement() instead. 1914 InstructionMark im(this); 1915 prefix(dst); 1916 emit_int8((unsigned char)0xFF); 1917 emit_operand(rcx, dst); 1918 } 1919 1920 void Assembler::divsd(XMMRegister dst, Address src) { 1921 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1922 InstructionMark im(this); 1923 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1924 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 1925 attributes.set_rex_vex_w_reverted(); 1926 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1927 emit_int8(0x5E); 1928 emit_operand(dst, src); 1929 } 1930 1931 void Assembler::divsd(XMMRegister dst, XMMRegister src) { 1932 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 1933 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1934 attributes.set_rex_vex_w_reverted(); 1935 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 1936 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 1937 } 1938 1939 void Assembler::divss(XMMRegister dst, Address src) { 1940 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1941 InstructionMark im(this); 1942 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1943 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 1944 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1945 emit_int8(0x5E); 1946 emit_operand(dst, src); 1947 } 1948 1949 void Assembler::divss(XMMRegister dst, XMMRegister src) { 1950 NOT_LP64(assert(VM_Version::supports_sse(), "")); 1951 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 1952 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 1953 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 1954 } 1955 1956 void Assembler::emms() { 1957 NOT_LP64(assert(VM_Version::supports_mmx(), "")); 1958 emit_int16(0x0F, 0x77); 1959 } 1960 1961 void Assembler::hlt() { 1962 emit_int8((unsigned char)0xF4); 1963 } 1964 1965 void Assembler::idivl(Register src) { 1966 int encode = prefix_and_encode(src->encoding()); 1967 emit_int16((unsigned char)0xF7, (unsigned char)(0xF8 | encode)); 1968 } 1969 1970 void Assembler::divl(Register src) { // Unsigned 1971 int encode = prefix_and_encode(src->encoding()); 1972 emit_int16((unsigned char)0xF7, (unsigned char)(0xF0 | encode)); 1973 } 1974 1975 void Assembler::imull(Register src) { 1976 int encode = prefix_and_encode(src->encoding()); 1977 emit_int16((unsigned char)0xF7, (unsigned char)(0xE8 | encode)); 1978 } 1979 1980 void Assembler::imull(Register dst, Register src) { 1981 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1982 emit_int24(0x0F, (unsigned char)0xAF, (unsigned char)(0xC0 | encode)); 1983 } 1984 1985 1986 void Assembler::imull(Register dst, Register src, int value) { 1987 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 1988 if (is8bit(value)) { 1989 emit_int24(0x6B, (unsigned char)(0xC0 | encode), value & 0xFF); 1990 } else { 1991 emit_int16(0x69, (unsigned char)(0xC0 | encode)); 1992 emit_int32(value); 1993 } 1994 } 1995 1996 void Assembler::imull(Register dst, Address src) { 1997 InstructionMark im(this); 1998 prefix(src, dst); 1999 emit_int16(0x0F, (unsigned char) 0xAF); 2000 emit_operand(dst, src); 2001 } 2002 2003 2004 void Assembler::incl(Address dst) { 2005 // Don't use it directly. Use MacroAssembler::increment() instead. 2006 InstructionMark im(this); 2007 prefix(dst); 2008 emit_int8((unsigned char)0xFF); 2009 emit_operand(rax, dst); 2010 } 2011 2012 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) { 2013 InstructionMark im(this); 2014 assert((0 <= cc) && (cc < 16), "illegal cc"); 2015 if (L.is_bound()) { 2016 address dst = target(L); 2017 assert(dst != NULL, "jcc most probably wrong"); 2018 2019 const int short_size = 2; 2020 const int long_size = 6; 2021 intptr_t offs = (intptr_t)dst - (intptr_t)pc(); 2022 if (maybe_short && is8bit(offs - short_size)) { 2023 // 0111 tttn #8-bit disp 2024 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2025 } else { 2026 // 0000 1111 1000 tttn #32-bit disp 2027 assert(is_simm32(offs - long_size), 2028 "must be 32bit offset (call4)"); 2029 emit_int16(0x0F, (unsigned char)(0x80 | cc)); 2030 emit_int32(offs - long_size); 2031 } 2032 } else { 2033 // Note: could eliminate cond. jumps to this jump if condition 2034 // is the same however, seems to be rather unlikely case. 2035 // Note: use jccb() if label to be bound is very close to get 2036 // an 8-bit displacement 2037 L.add_patch_at(code(), locator()); 2038 emit_int16(0x0F, (unsigned char)(0x80 | cc)); 2039 emit_int32(0); 2040 } 2041 } 2042 2043 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { 2044 if (L.is_bound()) { 2045 const int short_size = 2; 2046 address entry = target(L); 2047 #ifdef ASSERT 2048 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2049 intptr_t delta = short_branch_delta(); 2050 if (delta != 0) { 2051 dist += (dist < 0 ? (-delta) :delta); 2052 } 2053 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2054 #endif 2055 intptr_t offs = (intptr_t)entry - (intptr_t)pc(); 2056 // 0111 tttn #8-bit disp 2057 emit_int16(0x70 | cc, (offs - short_size) & 0xFF); 2058 } else { 2059 InstructionMark im(this); 2060 L.add_patch_at(code(), locator(), file, line); 2061 emit_int16(0x70 | cc, 0); 2062 } 2063 } 2064 2065 void Assembler::jmp(Address adr) { 2066 InstructionMark im(this); 2067 prefix(adr); 2068 emit_int8((unsigned char)0xFF); 2069 emit_operand(rsp, adr); 2070 } 2071 2072 void Assembler::jmp(Label& L, bool maybe_short) { 2073 if (L.is_bound()) { 2074 address entry = target(L); 2075 assert(entry != NULL, "jmp most probably wrong"); 2076 InstructionMark im(this); 2077 const int short_size = 2; 2078 const int long_size = 5; 2079 intptr_t offs = entry - pc(); 2080 if (maybe_short && is8bit(offs - short_size)) { 2081 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); 2082 } else { 2083 emit_int8((unsigned char)0xE9); 2084 emit_int32(offs - long_size); 2085 } 2086 } else { 2087 // By default, forward jumps are always 32-bit displacements, since 2088 // we can't yet know where the label will be bound. If you're sure that 2089 // the forward jump will not run beyond 256 bytes, use jmpb to 2090 // force an 8-bit displacement. 2091 InstructionMark im(this); 2092 L.add_patch_at(code(), locator()); 2093 emit_int8((unsigned char)0xE9); 2094 emit_int32(0); 2095 } 2096 } 2097 2098 void Assembler::jmp(Register entry) { 2099 int encode = prefix_and_encode(entry->encoding()); 2100 emit_int16((unsigned char)0xFF, (unsigned char)(0xE0 | encode)); 2101 } 2102 2103 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { 2104 InstructionMark im(this); 2105 emit_int8((unsigned char)0xE9); 2106 assert(dest != NULL, "must have a target"); 2107 intptr_t disp = dest - (pc() + sizeof(int32_t)); 2108 assert(is_simm32(disp), "must be 32bit offset (jmp)"); 2109 emit_data(disp, rspec.reloc(), call32_operand); 2110 } 2111 2112 void Assembler::jmpb_0(Label& L, const char* file, int line) { 2113 if (L.is_bound()) { 2114 const int short_size = 2; 2115 address entry = target(L); 2116 assert(entry != NULL, "jmp most probably wrong"); 2117 #ifdef ASSERT 2118 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size); 2119 intptr_t delta = short_branch_delta(); 2120 if (delta != 0) { 2121 dist += (dist < 0 ? (-delta) :delta); 2122 } 2123 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); 2124 #endif 2125 intptr_t offs = entry - pc(); 2126 emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); 2127 } else { 2128 InstructionMark im(this); 2129 L.add_patch_at(code(), locator(), file, line); 2130 emit_int16((unsigned char)0xEB, 0); 2131 } 2132 } 2133 2134 void Assembler::ldmxcsr( Address src) { 2135 if (UseAVX > 0 ) { 2136 InstructionMark im(this); 2137 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2138 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2139 emit_int8((unsigned char)0xAE); 2140 emit_operand(as_Register(2), src); 2141 } else { 2142 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2143 InstructionMark im(this); 2144 prefix(src); 2145 emit_int16(0x0F, (unsigned char)0xAE); 2146 emit_operand(as_Register(2), src); 2147 } 2148 } 2149 2150 void Assembler::leal(Register dst, Address src) { 2151 InstructionMark im(this); 2152 #ifdef _LP64 2153 emit_int8(0x67); // addr32 2154 prefix(src, dst); 2155 #endif // LP64 2156 emit_int8((unsigned char)0x8D); 2157 emit_operand(dst, src); 2158 } 2159 2160 void Assembler::lfence() { 2161 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8); 2162 } 2163 2164 void Assembler::lock() { 2165 emit_int8((unsigned char)0xF0); 2166 } 2167 2168 void Assembler::lzcntl(Register dst, Register src) { 2169 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 2170 emit_int8((unsigned char)0xF3); 2171 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2172 emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); 2173 } 2174 2175 // Emit mfence instruction 2176 void Assembler::mfence() { 2177 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2178 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0); 2179 } 2180 2181 // Emit sfence instruction 2182 void Assembler::sfence() { 2183 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) 2184 emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8); 2185 } 2186 2187 void Assembler::mov(Register dst, Register src) { 2188 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2189 } 2190 2191 void Assembler::movapd(XMMRegister dst, XMMRegister src) { 2192 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2193 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2194 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2195 attributes.set_rex_vex_w_reverted(); 2196 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2197 emit_int16(0x28, (unsigned char)(0xC0 | encode)); 2198 } 2199 2200 void Assembler::movaps(XMMRegister dst, XMMRegister src) { 2201 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2202 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2203 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2204 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2205 emit_int16(0x28, (unsigned char)(0xC0 | encode)); 2206 } 2207 2208 void Assembler::movlhps(XMMRegister dst, XMMRegister src) { 2209 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2210 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2211 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2212 emit_int16(0x16, (unsigned char)(0xC0 | encode)); 2213 } 2214 2215 void Assembler::movb(Register dst, Address src) { 2216 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 2217 InstructionMark im(this); 2218 prefix(src, dst, true); 2219 emit_int8((unsigned char)0x8A); 2220 emit_operand(dst, src); 2221 } 2222 2223 void Assembler::movddup(XMMRegister dst, XMMRegister src) { 2224 NOT_LP64(assert(VM_Version::supports_sse3(), "")); 2225 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 2226 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2227 attributes.set_rex_vex_w_reverted(); 2228 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2229 emit_int16(0x12, 0xC0 | encode); 2230 } 2231 2232 void Assembler::kmovbl(KRegister dst, Register src) { 2233 assert(VM_Version::supports_avx512dq(), ""); 2234 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2235 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2236 emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); 2237 } 2238 2239 void Assembler::kmovbl(Register dst, KRegister src) { 2240 assert(VM_Version::supports_avx512dq(), ""); 2241 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2242 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2243 emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); 2244 } 2245 2246 void Assembler::kmovwl(KRegister dst, Register src) { 2247 assert(VM_Version::supports_evex(), ""); 2248 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2249 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2250 emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); 2251 } 2252 2253 void Assembler::kmovwl(Register dst, KRegister src) { 2254 assert(VM_Version::supports_evex(), ""); 2255 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2256 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2257 emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); 2258 } 2259 2260 void Assembler::kmovwl(KRegister dst, Address src) { 2261 assert(VM_Version::supports_evex(), ""); 2262 InstructionMark im(this); 2263 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2264 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2265 emit_int8((unsigned char)0x90); 2266 emit_operand((Register)dst, src); 2267 } 2268 2269 void Assembler::kmovdl(KRegister dst, Register src) { 2270 assert(VM_Version::supports_avx512bw(), ""); 2271 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2272 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2273 emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); 2274 } 2275 2276 void Assembler::kmovdl(Register dst, KRegister src) { 2277 assert(VM_Version::supports_avx512bw(), ""); 2278 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2279 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2280 emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); 2281 } 2282 2283 void Assembler::kmovql(KRegister dst, KRegister src) { 2284 assert(VM_Version::supports_avx512bw(), ""); 2285 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2286 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2287 emit_int16((unsigned char)0x90, (unsigned char)(0xC0 | encode)); 2288 } 2289 2290 void Assembler::kmovql(KRegister dst, Address src) { 2291 assert(VM_Version::supports_avx512bw(), ""); 2292 InstructionMark im(this); 2293 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2294 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2295 emit_int8((unsigned char)0x90); 2296 emit_operand((Register)dst, src); 2297 } 2298 2299 void Assembler::kmovql(Address dst, KRegister src) { 2300 assert(VM_Version::supports_avx512bw(), ""); 2301 InstructionMark im(this); 2302 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2303 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2304 emit_int8((unsigned char)0x90); 2305 emit_operand((Register)src, dst); 2306 } 2307 2308 void Assembler::kmovql(KRegister dst, Register src) { 2309 assert(VM_Version::supports_avx512bw(), ""); 2310 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2311 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2312 emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); 2313 } 2314 2315 void Assembler::kmovql(Register dst, KRegister src) { 2316 assert(VM_Version::supports_avx512bw(), ""); 2317 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2318 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2319 emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); 2320 } 2321 2322 void Assembler::knotwl(KRegister dst, KRegister src) { 2323 assert(VM_Version::supports_evex(), ""); 2324 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2325 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2326 emit_int16((unsigned char)0x44, (unsigned char)(0xC0 | encode)); 2327 } 2328 2329 // This instruction produces ZF or CF flags 2330 void Assembler::kortestbl(KRegister src1, KRegister src2) { 2331 assert(VM_Version::supports_avx512dq(), ""); 2332 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2333 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2334 emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); 2335 } 2336 2337 // This instruction produces ZF or CF flags 2338 void Assembler::kortestwl(KRegister src1, KRegister src2) { 2339 assert(VM_Version::supports_evex(), ""); 2340 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2341 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2342 emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); 2343 } 2344 2345 // This instruction produces ZF or CF flags 2346 void Assembler::kortestdl(KRegister src1, KRegister src2) { 2347 assert(VM_Version::supports_avx512bw(), ""); 2348 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2349 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2350 emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); 2351 } 2352 2353 // This instruction produces ZF or CF flags 2354 void Assembler::kortestql(KRegister src1, KRegister src2) { 2355 assert(VM_Version::supports_avx512bw(), ""); 2356 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2357 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2358 emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); 2359 } 2360 2361 // This instruction produces ZF or CF flags 2362 void Assembler::ktestql(KRegister src1, KRegister src2) { 2363 assert(VM_Version::supports_avx512bw(), ""); 2364 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2365 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2366 emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); 2367 } 2368 2369 void Assembler::ktestq(KRegister src1, KRegister src2) { 2370 assert(VM_Version::supports_avx512bw(), ""); 2371 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2372 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 2373 emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); 2374 } 2375 2376 void Assembler::ktestd(KRegister src1, KRegister src2) { 2377 assert(VM_Version::supports_avx512bw(), ""); 2378 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 2379 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2380 emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); 2381 } 2382 2383 void Assembler::movb(Address dst, int imm8) { 2384 InstructionMark im(this); 2385 prefix(dst); 2386 emit_int8((unsigned char)0xC6); 2387 emit_operand(rax, dst, 1); 2388 emit_int8(imm8); 2389 } 2390 2391 2392 void Assembler::movb(Address dst, Register src) { 2393 assert(src->has_byte_register(), "must have byte register"); 2394 InstructionMark im(this); 2395 prefix(dst, src, true); 2396 emit_int8((unsigned char)0x88); 2397 emit_operand(src, dst); 2398 } 2399 2400 void Assembler::movdl(XMMRegister dst, Register src) { 2401 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2402 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2403 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2404 emit_int16(0x6E, (unsigned char)(0xC0 | encode)); 2405 } 2406 2407 void Assembler::movdl(Register dst, XMMRegister src) { 2408 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2409 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2410 // swap src/dst to get correct prefix 2411 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2412 emit_int16(0x7E, (unsigned char)(0xC0 | encode)); 2413 } 2414 2415 void Assembler::movdl(XMMRegister dst, Address src) { 2416 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2417 InstructionMark im(this); 2418 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2419 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2420 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2421 emit_int8(0x6E); 2422 emit_operand(dst, src); 2423 } 2424 2425 void Assembler::movdl(Address dst, XMMRegister src) { 2426 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2427 InstructionMark im(this); 2428 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2429 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2430 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2431 emit_int8(0x7E); 2432 emit_operand(src, dst); 2433 } 2434 2435 void Assembler::movdqa(XMMRegister dst, XMMRegister src) { 2436 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2437 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2438 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2439 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2440 } 2441 2442 void Assembler::movdqa(XMMRegister dst, Address src) { 2443 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2444 InstructionMark im(this); 2445 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2446 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2447 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2448 emit_int8(0x6F); 2449 emit_operand(dst, src); 2450 } 2451 2452 void Assembler::movdqu(XMMRegister dst, Address src) { 2453 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2454 InstructionMark im(this); 2455 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2456 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2457 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2458 emit_int8(0x6F); 2459 emit_operand(dst, src); 2460 } 2461 2462 void Assembler::movdqu(XMMRegister dst, XMMRegister src) { 2463 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2464 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2465 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2466 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2467 } 2468 2469 void Assembler::movdqu(Address dst, XMMRegister src) { 2470 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2471 InstructionMark im(this); 2472 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2473 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2474 attributes.reset_is_clear_context(); 2475 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2476 emit_int8(0x7F); 2477 emit_operand(src, dst); 2478 } 2479 2480 // Move Unaligned 256bit Vector 2481 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2482 assert(UseAVX > 0, ""); 2483 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2484 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2485 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2486 } 2487 2488 void Assembler::vmovdqu(XMMRegister dst, Address src) { 2489 assert(UseAVX > 0, ""); 2490 InstructionMark im(this); 2491 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2492 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2493 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2494 emit_int8(0x6F); 2495 emit_operand(dst, src); 2496 } 2497 2498 void Assembler::vmovdqu(Address dst, XMMRegister src) { 2499 assert(UseAVX > 0, ""); 2500 InstructionMark im(this); 2501 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2502 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2503 attributes.reset_is_clear_context(); 2504 // swap src<->dst for encoding 2505 assert(src != xnoreg, "sanity"); 2506 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2507 emit_int8(0x7F); 2508 emit_operand(src, dst); 2509 } 2510 2511 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64) 2512 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) { 2513 assert(VM_Version::supports_evex(), ""); 2514 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2515 attributes.set_is_evex_instruction(); 2516 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2517 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2518 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2519 } 2520 2521 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { 2522 assert(VM_Version::supports_evex(), ""); 2523 InstructionMark im(this); 2524 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2525 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2526 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2527 attributes.set_is_evex_instruction(); 2528 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2529 emit_int8(0x6F); 2530 emit_operand(dst, src); 2531 } 2532 2533 void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) { 2534 assert(VM_Version::supports_evex(), ""); 2535 assert(src != xnoreg, "sanity"); 2536 InstructionMark im(this); 2537 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2538 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2539 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2540 attributes.set_is_evex_instruction(); 2541 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2542 emit_int8(0x7F); 2543 emit_operand(src, dst); 2544 } 2545 2546 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len) { 2547 assert(VM_Version::supports_avx512vlbw(), ""); 2548 InstructionMark im(this); 2549 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2550 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2551 attributes.set_embedded_opmask_register_specifier(mask); 2552 attributes.set_is_evex_instruction(); 2553 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2554 emit_int8(0x6F); 2555 emit_operand(dst, src); 2556 } 2557 2558 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) { 2559 assert(VM_Version::supports_evex(), ""); 2560 InstructionMark im(this); 2561 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2562 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2563 attributes.set_is_evex_instruction(); 2564 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2565 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2566 emit_int8(0x6F); 2567 emit_operand(dst, src); 2568 } 2569 2570 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 2571 assert(VM_Version::supports_avx512vlbw(), ""); 2572 InstructionMark im(this); 2573 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2574 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2575 attributes.set_embedded_opmask_register_specifier(mask); 2576 attributes.set_is_evex_instruction(); 2577 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2578 emit_int8(0x6F); 2579 emit_operand(dst, src); 2580 } 2581 2582 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) { 2583 assert(VM_Version::supports_evex(), ""); 2584 assert(src != xnoreg, "sanity"); 2585 InstructionMark im(this); 2586 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 2587 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2588 attributes.set_is_evex_instruction(); 2589 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; 2590 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); 2591 emit_int8(0x7F); 2592 emit_operand(src, dst); 2593 } 2594 2595 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len) { 2596 assert(VM_Version::supports_avx512vlbw(), ""); 2597 assert(src != xnoreg, "sanity"); 2598 InstructionMark im(this); 2599 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 2600 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2601 attributes.reset_is_clear_context(); 2602 attributes.set_embedded_opmask_register_specifier(mask); 2603 attributes.set_is_evex_instruction(); 2604 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2605 emit_int8(0x7F); 2606 emit_operand(src, dst); 2607 } 2608 2609 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { 2610 assert(VM_Version::supports_evex(), ""); 2611 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2612 attributes.set_is_evex_instruction(); 2613 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2614 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2615 } 2616 2617 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { 2618 assert(VM_Version::supports_evex(), ""); 2619 InstructionMark im(this); 2620 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true); 2621 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2622 attributes.set_is_evex_instruction(); 2623 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2624 emit_int8(0x6F); 2625 emit_operand(dst, src); 2626 } 2627 2628 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) { 2629 assert(VM_Version::supports_evex(), ""); 2630 assert(src != xnoreg, "sanity"); 2631 InstructionMark im(this); 2632 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2633 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2634 attributes.reset_is_clear_context(); 2635 attributes.set_is_evex_instruction(); 2636 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2637 emit_int8(0x7F); 2638 emit_operand(src, dst); 2639 } 2640 2641 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { 2642 assert(VM_Version::supports_evex(), ""); 2643 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2644 attributes.set_is_evex_instruction(); 2645 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2646 emit_int16(0x6F, (unsigned char)(0xC0 | encode)); 2647 } 2648 2649 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { 2650 assert(VM_Version::supports_evex(), ""); 2651 InstructionMark im(this); 2652 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2653 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2654 attributes.set_is_evex_instruction(); 2655 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2656 emit_int8(0x6F); 2657 emit_operand(dst, src); 2658 } 2659 2660 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) { 2661 assert(VM_Version::supports_evex(), ""); 2662 assert(src != xnoreg, "sanity"); 2663 InstructionMark im(this); 2664 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2665 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 2666 attributes.reset_is_clear_context(); 2667 attributes.set_is_evex_instruction(); 2668 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2669 emit_int8(0x7F); 2670 emit_operand(src, dst); 2671 } 2672 2673 // Uses zero extension on 64bit 2674 2675 void Assembler::movl(Register dst, int32_t imm32) { 2676 int encode = prefix_and_encode(dst->encoding()); 2677 emit_int8((unsigned char)(0xB8 | encode)); 2678 emit_int32(imm32); 2679 } 2680 2681 void Assembler::movl(Register dst, Register src) { 2682 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2683 emit_int16((unsigned char)0x8B, (unsigned char)(0xC0 | encode)); 2684 } 2685 2686 void Assembler::movl(Register dst, Address src) { 2687 InstructionMark im(this); 2688 prefix(src, dst); 2689 emit_int8((unsigned char)0x8B); 2690 emit_operand(dst, src); 2691 } 2692 2693 void Assembler::movl(Address dst, int32_t imm32) { 2694 InstructionMark im(this); 2695 prefix(dst); 2696 emit_int8((unsigned char)0xC7); 2697 emit_operand(rax, dst, 4); 2698 emit_int32(imm32); 2699 } 2700 2701 void Assembler::movl(Address dst, Register src) { 2702 InstructionMark im(this); 2703 prefix(dst, src); 2704 emit_int8((unsigned char)0x89); 2705 emit_operand(src, dst); 2706 } 2707 2708 // New cpus require to use movsd and movss to avoid partial register stall 2709 // when loading from memory. But for old Opteron use movlpd instead of movsd. 2710 // The selection is done in MacroAssembler::movdbl() and movflt(). 2711 void Assembler::movlpd(XMMRegister dst, Address src) { 2712 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2713 InstructionMark im(this); 2714 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 2715 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2716 attributes.set_rex_vex_w_reverted(); 2717 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2718 emit_int8(0x12); 2719 emit_operand(dst, src); 2720 } 2721 2722 void Assembler::movq( MMXRegister dst, Address src ) { 2723 assert( VM_Version::supports_mmx(), "" ); 2724 emit_int16(0x0F, 0x6F); 2725 emit_operand(dst, src); 2726 } 2727 2728 void Assembler::movq( Address dst, MMXRegister src ) { 2729 assert( VM_Version::supports_mmx(), "" ); 2730 emit_int16(0x0F, 0x7F); 2731 // workaround gcc (3.2.1-7a) bug 2732 // In that version of gcc with only an emit_operand(MMX, Address) 2733 // gcc will tail jump and try and reverse the parameters completely 2734 // obliterating dst in the process. By having a version available 2735 // that doesn't need to swap the args at the tail jump the bug is 2736 // avoided. 2737 emit_operand(dst, src); 2738 } 2739 2740 void Assembler::movq(XMMRegister dst, Address src) { 2741 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2742 InstructionMark im(this); 2743 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2744 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2745 attributes.set_rex_vex_w_reverted(); 2746 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2747 emit_int8(0x7E); 2748 emit_operand(dst, src); 2749 } 2750 2751 void Assembler::movq(Address dst, XMMRegister src) { 2752 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2753 InstructionMark im(this); 2754 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2755 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2756 attributes.set_rex_vex_w_reverted(); 2757 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 2758 emit_int8((unsigned char)0xD6); 2759 emit_operand(src, dst); 2760 } 2761 2762 void Assembler::movsbl(Register dst, Address src) { // movsxb 2763 InstructionMark im(this); 2764 prefix(src, dst); 2765 emit_int16(0x0F, (unsigned char)0xBE); 2766 emit_operand(dst, src); 2767 } 2768 2769 void Assembler::movsbl(Register dst, Register src) { // movsxb 2770 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2771 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2772 emit_int24(0x0F, (unsigned char)0xBE, (unsigned char)(0xC0 | encode)); 2773 } 2774 2775 void Assembler::movsd(XMMRegister dst, XMMRegister src) { 2776 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2777 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2778 attributes.set_rex_vex_w_reverted(); 2779 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2780 emit_int16(0x10, (unsigned char)(0xC0 | encode)); 2781 } 2782 2783 void Assembler::movsd(XMMRegister dst, Address src) { 2784 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2785 InstructionMark im(this); 2786 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2787 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2788 attributes.set_rex_vex_w_reverted(); 2789 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2790 emit_int8(0x10); 2791 emit_operand(dst, src); 2792 } 2793 2794 void Assembler::movsd(Address dst, XMMRegister src) { 2795 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2796 InstructionMark im(this); 2797 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2798 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2799 attributes.reset_is_clear_context(); 2800 attributes.set_rex_vex_w_reverted(); 2801 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2802 emit_int8(0x11); 2803 emit_operand(src, dst); 2804 } 2805 2806 void Assembler::movss(XMMRegister dst, XMMRegister src) { 2807 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2808 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2809 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2810 emit_int16(0x10, (unsigned char)(0xC0 | encode)); 2811 } 2812 2813 void Assembler::movss(XMMRegister dst, Address src) { 2814 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2815 InstructionMark im(this); 2816 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2817 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2818 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2819 emit_int8(0x10); 2820 emit_operand(dst, src); 2821 } 2822 2823 void Assembler::movss(Address dst, XMMRegister src) { 2824 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2825 InstructionMark im(this); 2826 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2827 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2828 attributes.reset_is_clear_context(); 2829 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2830 emit_int8(0x11); 2831 emit_operand(src, dst); 2832 } 2833 2834 void Assembler::movswl(Register dst, Address src) { // movsxw 2835 InstructionMark im(this); 2836 prefix(src, dst); 2837 emit_int16(0x0F, (unsigned char)0xBF); 2838 emit_operand(dst, src); 2839 } 2840 2841 void Assembler::movswl(Register dst, Register src) { // movsxw 2842 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2843 emit_int24(0x0F, (unsigned char)0xBF, (unsigned char)(0xC0 | encode)); 2844 } 2845 2846 void Assembler::movw(Address dst, int imm16) { 2847 InstructionMark im(this); 2848 2849 emit_int8(0x66); // switch to 16-bit mode 2850 prefix(dst); 2851 emit_int8((unsigned char)0xC7); 2852 emit_operand(rax, dst, 2); 2853 emit_int16(imm16); 2854 } 2855 2856 void Assembler::movw(Register dst, Address src) { 2857 InstructionMark im(this); 2858 emit_int8(0x66); 2859 prefix(src, dst); 2860 emit_int8((unsigned char)0x8B); 2861 emit_operand(dst, src); 2862 } 2863 2864 void Assembler::movw(Address dst, Register src) { 2865 InstructionMark im(this); 2866 emit_int8(0x66); 2867 prefix(dst, src); 2868 emit_int8((unsigned char)0x89); 2869 emit_operand(src, dst); 2870 } 2871 2872 void Assembler::movzbl(Register dst, Address src) { // movzxb 2873 InstructionMark im(this); 2874 prefix(src, dst); 2875 emit_int16(0x0F, (unsigned char)0xB6); 2876 emit_operand(dst, src); 2877 } 2878 2879 void Assembler::movzbl(Register dst, Register src) { // movzxb 2880 NOT_LP64(assert(src->has_byte_register(), "must have byte register")); 2881 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); 2882 emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); 2883 } 2884 2885 void Assembler::movzwl(Register dst, Address src) { // movzxw 2886 InstructionMark im(this); 2887 prefix(src, dst); 2888 emit_int16(0x0F, (unsigned char)0xB7); 2889 emit_operand(dst, src); 2890 } 2891 2892 void Assembler::movzwl(Register dst, Register src) { // movzxw 2893 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 2894 emit_int24(0x0F, (unsigned char)0xB7, 0xC0 | encode); 2895 } 2896 2897 void Assembler::mull(Address src) { 2898 InstructionMark im(this); 2899 prefix(src); 2900 emit_int8((unsigned char)0xF7); 2901 emit_operand(rsp, src); 2902 } 2903 2904 void Assembler::mull(Register src) { 2905 int encode = prefix_and_encode(src->encoding()); 2906 emit_int16((unsigned char)0xF7, (unsigned char)(0xE0 | encode)); 2907 } 2908 2909 void Assembler::mulsd(XMMRegister dst, Address src) { 2910 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2911 InstructionMark im(this); 2912 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2913 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 2914 attributes.set_rex_vex_w_reverted(); 2915 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2916 emit_int8(0x59); 2917 emit_operand(dst, src); 2918 } 2919 2920 void Assembler::mulsd(XMMRegister dst, XMMRegister src) { 2921 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 2922 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2923 attributes.set_rex_vex_w_reverted(); 2924 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 2925 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 2926 } 2927 2928 void Assembler::mulss(XMMRegister dst, Address src) { 2929 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2930 InstructionMark im(this); 2931 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2932 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 2933 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2934 emit_int8(0x59); 2935 emit_operand(dst, src); 2936 } 2937 2938 void Assembler::mulss(XMMRegister dst, XMMRegister src) { 2939 NOT_LP64(assert(VM_Version::supports_sse(), "")); 2940 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 2941 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 2942 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 2943 } 2944 2945 void Assembler::negl(Register dst) { 2946 int encode = prefix_and_encode(dst->encoding()); 2947 emit_int16((unsigned char)0xF7, (unsigned char)(0xD8 | encode)); 2948 } 2949 2950 void Assembler::nop(int i) { 2951 #ifdef ASSERT 2952 assert(i > 0, " "); 2953 // The fancy nops aren't currently recognized by debuggers making it a 2954 // pain to disassemble code while debugging. If asserts are on clearly 2955 // speed is not an issue so simply use the single byte traditional nop 2956 // to do alignment. 2957 2958 for (; i > 0 ; i--) emit_int8((unsigned char)0x90); 2959 return; 2960 2961 #endif // ASSERT 2962 2963 if (UseAddressNop && VM_Version::is_intel()) { 2964 // 2965 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel 2966 // 1: 0x90 2967 // 2: 0x66 0x90 2968 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 2969 // 4: 0x0F 0x1F 0x40 0x00 2970 // 5: 0x0F 0x1F 0x44 0x00 0x00 2971 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 2972 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 2973 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2974 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2975 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2976 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 2977 2978 // The rest coding is Intel specific - don't use consecutive address nops 2979 2980 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2981 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2982 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2983 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 2984 2985 while(i >= 15) { 2986 // For Intel don't generate consecutive addess nops (mix with regular nops) 2987 i -= 15; 2988 emit_int24(0x66, 0x66, 0x66); 2989 addr_nop_8(); 2990 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 2991 } 2992 switch (i) { 2993 case 14: 2994 emit_int8(0x66); // size prefix 2995 case 13: 2996 emit_int8(0x66); // size prefix 2997 case 12: 2998 addr_nop_8(); 2999 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3000 break; 3001 case 11: 3002 emit_int8(0x66); // size prefix 3003 case 10: 3004 emit_int8(0x66); // size prefix 3005 case 9: 3006 emit_int8(0x66); // size prefix 3007 case 8: 3008 addr_nop_8(); 3009 break; 3010 case 7: 3011 addr_nop_7(); 3012 break; 3013 case 6: 3014 emit_int8(0x66); // size prefix 3015 case 5: 3016 addr_nop_5(); 3017 break; 3018 case 4: 3019 addr_nop_4(); 3020 break; 3021 case 3: 3022 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3023 emit_int8(0x66); // size prefix 3024 case 2: 3025 emit_int8(0x66); // size prefix 3026 case 1: 3027 emit_int8((unsigned char)0x90); 3028 // nop 3029 break; 3030 default: 3031 assert(i == 0, " "); 3032 } 3033 return; 3034 } 3035 if (UseAddressNop && VM_Version::is_amd_family()) { 3036 // 3037 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD. 3038 // 1: 0x90 3039 // 2: 0x66 0x90 3040 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3041 // 4: 0x0F 0x1F 0x40 0x00 3042 // 5: 0x0F 0x1F 0x44 0x00 0x00 3043 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3044 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3045 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3046 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3047 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3048 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3049 3050 // The rest coding is AMD specific - use consecutive address nops 3051 3052 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3053 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00 3054 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3055 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3056 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3057 // Size prefixes (0x66) are added for larger sizes 3058 3059 while(i >= 22) { 3060 i -= 11; 3061 emit_int24(0x66, 0x66, 0x66); 3062 addr_nop_8(); 3063 } 3064 // Generate first nop for size between 21-12 3065 switch (i) { 3066 case 21: 3067 i -= 1; 3068 emit_int8(0x66); // size prefix 3069 case 20: 3070 case 19: 3071 i -= 1; 3072 emit_int8(0x66); // size prefix 3073 case 18: 3074 case 17: 3075 i -= 1; 3076 emit_int8(0x66); // size prefix 3077 case 16: 3078 case 15: 3079 i -= 8; 3080 addr_nop_8(); 3081 break; 3082 case 14: 3083 case 13: 3084 i -= 7; 3085 addr_nop_7(); 3086 break; 3087 case 12: 3088 i -= 6; 3089 emit_int8(0x66); // size prefix 3090 addr_nop_5(); 3091 break; 3092 default: 3093 assert(i < 12, " "); 3094 } 3095 3096 // Generate second nop for size between 11-1 3097 switch (i) { 3098 case 11: 3099 emit_int8(0x66); // size prefix 3100 case 10: 3101 emit_int8(0x66); // size prefix 3102 case 9: 3103 emit_int8(0x66); // size prefix 3104 case 8: 3105 addr_nop_8(); 3106 break; 3107 case 7: 3108 addr_nop_7(); 3109 break; 3110 case 6: 3111 emit_int8(0x66); // size prefix 3112 case 5: 3113 addr_nop_5(); 3114 break; 3115 case 4: 3116 addr_nop_4(); 3117 break; 3118 case 3: 3119 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3120 emit_int8(0x66); // size prefix 3121 case 2: 3122 emit_int8(0x66); // size prefix 3123 case 1: 3124 emit_int8((unsigned char)0x90); 3125 // nop 3126 break; 3127 default: 3128 assert(i == 0, " "); 3129 } 3130 return; 3131 } 3132 3133 if (UseAddressNop && VM_Version::is_zx()) { 3134 // 3135 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX 3136 // 1: 0x90 3137 // 2: 0x66 0x90 3138 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding) 3139 // 4: 0x0F 0x1F 0x40 0x00 3140 // 5: 0x0F 0x1F 0x44 0x00 0x00 3141 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00 3142 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 3143 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3144 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3145 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3146 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 3147 3148 // The rest coding is ZX specific - don't use consecutive address nops 3149 3150 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3151 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3152 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3153 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 3154 3155 while (i >= 15) { 3156 // For ZX don't generate consecutive addess nops (mix with regular nops) 3157 i -= 15; 3158 emit_int24(0x66, 0x66, 0x66); 3159 addr_nop_8(); 3160 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3161 } 3162 switch (i) { 3163 case 14: 3164 emit_int8(0x66); // size prefix 3165 case 13: 3166 emit_int8(0x66); // size prefix 3167 case 12: 3168 addr_nop_8(); 3169 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3170 break; 3171 case 11: 3172 emit_int8(0x66); // size prefix 3173 case 10: 3174 emit_int8(0x66); // size prefix 3175 case 9: 3176 emit_int8(0x66); // size prefix 3177 case 8: 3178 addr_nop_8(); 3179 break; 3180 case 7: 3181 addr_nop_7(); 3182 break; 3183 case 6: 3184 emit_int8(0x66); // size prefix 3185 case 5: 3186 addr_nop_5(); 3187 break; 3188 case 4: 3189 addr_nop_4(); 3190 break; 3191 case 3: 3192 // Don't use "0x0F 0x1F 0x00" - need patching safe padding 3193 emit_int8(0x66); // size prefix 3194 case 2: 3195 emit_int8(0x66); // size prefix 3196 case 1: 3197 emit_int8((unsigned char)0x90); 3198 // nop 3199 break; 3200 default: 3201 assert(i == 0, " "); 3202 } 3203 return; 3204 } 3205 3206 // Using nops with size prefixes "0x66 0x90". 3207 // From AMD Optimization Guide: 3208 // 1: 0x90 3209 // 2: 0x66 0x90 3210 // 3: 0x66 0x66 0x90 3211 // 4: 0x66 0x66 0x66 0x90 3212 // 5: 0x66 0x66 0x90 0x66 0x90 3213 // 6: 0x66 0x66 0x90 0x66 0x66 0x90 3214 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 3215 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 3216 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3217 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 3218 // 3219 while (i > 12) { 3220 i -= 4; 3221 emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); 3222 } 3223 // 1 - 12 nops 3224 if (i > 8) { 3225 if (i > 9) { 3226 i -= 1; 3227 emit_int8(0x66); 3228 } 3229 i -= 3; 3230 emit_int24(0x66, 0x66, (unsigned char)0x90); 3231 } 3232 // 1 - 8 nops 3233 if (i > 4) { 3234 if (i > 6) { 3235 i -= 1; 3236 emit_int8(0x66); 3237 } 3238 i -= 3; 3239 emit_int24(0x66, 0x66, (unsigned char)0x90); 3240 } 3241 switch (i) { 3242 case 4: 3243 emit_int8(0x66); 3244 case 3: 3245 emit_int8(0x66); 3246 case 2: 3247 emit_int8(0x66); 3248 case 1: 3249 emit_int8((unsigned char)0x90); 3250 break; 3251 default: 3252 assert(i == 0, " "); 3253 } 3254 } 3255 3256 void Assembler::notl(Register dst) { 3257 int encode = prefix_and_encode(dst->encoding()); 3258 emit_int16((unsigned char)0xF7, (unsigned char)(0xD0 | encode)); 3259 } 3260 3261 void Assembler::orl(Address dst, int32_t imm32) { 3262 InstructionMark im(this); 3263 prefix(dst); 3264 emit_arith_operand(0x81, rcx, dst, imm32); 3265 } 3266 3267 void Assembler::orl(Register dst, int32_t imm32) { 3268 prefix(dst); 3269 emit_arith(0x81, 0xC8, dst, imm32); 3270 } 3271 3272 void Assembler::orl(Register dst, Address src) { 3273 InstructionMark im(this); 3274 prefix(src, dst); 3275 emit_int8(0x0B); 3276 emit_operand(dst, src); 3277 } 3278 3279 void Assembler::orl(Register dst, Register src) { 3280 (void) prefix_and_encode(dst->encoding(), src->encoding()); 3281 emit_arith(0x0B, 0xC0, dst, src); 3282 } 3283 3284 void Assembler::orl(Address dst, Register src) { 3285 InstructionMark im(this); 3286 prefix(dst, src); 3287 emit_int8(0x09); 3288 emit_operand(src, dst); 3289 } 3290 3291 void Assembler::orb(Address dst, int imm8) { 3292 InstructionMark im(this); 3293 prefix(dst); 3294 emit_int8((unsigned char)0x80); 3295 emit_operand(rcx, dst, 1); 3296 emit_int8(imm8); 3297 } 3298 3299 void Assembler::packuswb(XMMRegister dst, Address src) { 3300 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3301 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 3302 InstructionMark im(this); 3303 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3304 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3305 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3306 emit_int8(0x67); 3307 emit_operand(dst, src); 3308 } 3309 3310 void Assembler::packuswb(XMMRegister dst, XMMRegister src) { 3311 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3312 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3313 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3314 emit_int16(0x67, (unsigned char)(0xC0 | encode)); 3315 } 3316 3317 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3318 assert(UseAVX > 0, "some form of AVX must be enabled"); 3319 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3320 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3321 emit_int16(0x67, (unsigned char)(0xC0 | encode)); 3322 } 3323 3324 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { 3325 assert(VM_Version::supports_avx2(), ""); 3326 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3327 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3328 emit_int24(0x00, (unsigned char)(0xC0 | encode), imm8); 3329 } 3330 3331 void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3332 assert(UseAVX > 2, "requires AVX512F"); 3333 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3334 attributes.set_is_evex_instruction(); 3335 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3336 emit_int16((unsigned char)0x36, (unsigned char)(0xC0 | encode)); 3337 } 3338 3339 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3340 assert(VM_Version::supports_avx2(), ""); 3341 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3342 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3343 emit_int24(0x46, 0xC0 | encode, imm8); 3344 } 3345 3346 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { 3347 assert(VM_Version::supports_avx(), ""); 3348 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3349 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3350 emit_int24(0x06, 0xC0 | encode, imm8); 3351 } 3352 3353 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3354 assert(VM_Version::supports_evex(), ""); 3355 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3356 attributes.set_is_evex_instruction(); 3357 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3358 emit_int16(0x76, (unsigned char)(0xC0 | encode)); 3359 } 3360 3361 3362 void Assembler::pause() { 3363 emit_int16((unsigned char)0xF3, (unsigned char)0x90); 3364 } 3365 3366 void Assembler::ud2() { 3367 emit_int16(0x0F, 0x0B); 3368 } 3369 3370 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3371 assert(VM_Version::supports_sse4_2(), ""); 3372 InstructionMark im(this); 3373 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3374 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3375 emit_int8(0x61); 3376 emit_operand(dst, src); 3377 emit_int8(imm8); 3378 } 3379 3380 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3381 assert(VM_Version::supports_sse4_2(), ""); 3382 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3383 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3384 emit_int24(0x61, (unsigned char)(0xC0 | encode), imm8); 3385 } 3386 3387 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3388 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3389 assert(VM_Version::supports_sse2(), ""); 3390 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3391 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3392 emit_int16(0x74, (unsigned char)(0xC0 | encode)); 3393 } 3394 3395 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3396 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3397 assert(VM_Version::supports_avx(), ""); 3398 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3399 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3400 emit_int16(0x74, (unsigned char)(0xC0 | encode)); 3401 } 3402 3403 // In this context, kdst is written the mask used to process the equal components 3404 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3405 assert(VM_Version::supports_avx512bw(), ""); 3406 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3407 attributes.set_is_evex_instruction(); 3408 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3409 emit_int16(0x74, (unsigned char)(0xC0 | encode)); 3410 } 3411 3412 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3413 assert(VM_Version::supports_avx512vlbw(), ""); 3414 InstructionMark im(this); 3415 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3416 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3417 attributes.set_is_evex_instruction(); 3418 int dst_enc = kdst->encoding(); 3419 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3420 emit_int8(0x64); 3421 emit_operand(as_Register(dst_enc), src); 3422 } 3423 3424 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3425 assert(VM_Version::supports_avx512vlbw(), ""); 3426 InstructionMark im(this); 3427 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3428 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3429 attributes.reset_is_clear_context(); 3430 attributes.set_embedded_opmask_register_specifier(mask); 3431 attributes.set_is_evex_instruction(); 3432 int dst_enc = kdst->encoding(); 3433 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3434 emit_int8(0x64); 3435 emit_operand(as_Register(dst_enc), src); 3436 } 3437 3438 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3439 assert(VM_Version::supports_avx512vlbw(), ""); 3440 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3441 attributes.set_is_evex_instruction(); 3442 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3443 emit_int24(0x3E, (unsigned char)(0xC0 | encode), vcc); 3444 } 3445 3446 void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { 3447 assert(VM_Version::supports_avx512vlbw(), ""); 3448 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3449 attributes.reset_is_clear_context(); 3450 attributes.set_embedded_opmask_register_specifier(mask); 3451 attributes.set_is_evex_instruction(); 3452 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3453 emit_int24(0x3E, (unsigned char)(0xC0 | encode), vcc); 3454 } 3455 3456 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { 3457 assert(VM_Version::supports_avx512vlbw(), ""); 3458 InstructionMark im(this); 3459 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3460 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3461 attributes.set_is_evex_instruction(); 3462 int dst_enc = kdst->encoding(); 3463 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3464 emit_int8(0x3E); 3465 emit_operand(as_Register(dst_enc), src); 3466 emit_int8(vcc); 3467 } 3468 3469 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3470 assert(VM_Version::supports_avx512bw(), ""); 3471 InstructionMark im(this); 3472 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3473 attributes.set_is_evex_instruction(); 3474 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3475 int dst_enc = kdst->encoding(); 3476 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3477 emit_int8(0x74); 3478 emit_operand(as_Register(dst_enc), src); 3479 } 3480 3481 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) { 3482 assert(VM_Version::supports_avx512vlbw(), ""); 3483 InstructionMark im(this); 3484 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ true); 3485 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3486 attributes.reset_is_clear_context(); 3487 attributes.set_embedded_opmask_register_specifier(mask); 3488 attributes.set_is_evex_instruction(); 3489 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3490 emit_int8(0x74); 3491 emit_operand(as_Register(kdst->encoding()), src); 3492 } 3493 3494 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3495 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3496 assert(VM_Version::supports_sse2(), ""); 3497 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3498 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3499 emit_int16(0x75, (unsigned char)(0xC0 | encode)); 3500 } 3501 3502 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3503 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3504 assert(VM_Version::supports_avx(), ""); 3505 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3506 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3507 emit_int16(0x75, (unsigned char)(0xC0 | encode)); 3508 } 3509 3510 // In this context, kdst is written the mask used to process the equal components 3511 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3512 assert(VM_Version::supports_avx512bw(), ""); 3513 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3514 attributes.set_is_evex_instruction(); 3515 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3516 emit_int16(0x75, (unsigned char)(0xC0 | encode)); 3517 } 3518 3519 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3520 assert(VM_Version::supports_avx512bw(), ""); 3521 InstructionMark im(this); 3522 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3523 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 3524 attributes.set_is_evex_instruction(); 3525 int dst_enc = kdst->encoding(); 3526 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3527 emit_int8(0x75); 3528 emit_operand(as_Register(dst_enc), src); 3529 } 3530 3531 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3532 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { 3533 assert(VM_Version::supports_sse2(), ""); 3534 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3535 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3536 emit_int8(0x76); 3537 emit_int8((unsigned char)(0xC0 | encode)); 3538 } 3539 3540 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3541 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3542 assert(VM_Version::supports_avx(), ""); 3543 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3544 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3545 emit_int16(0x76, (unsigned char)(0xC0 | encode)); 3546 } 3547 3548 // In this context, kdst is written the mask used to process the equal components 3549 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3550 assert(VM_Version::supports_evex(), ""); 3551 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3552 attributes.set_is_evex_instruction(); 3553 attributes.reset_is_clear_context(); 3554 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3555 emit_int16(0x76, (unsigned char)(0xC0 | encode)); 3556 } 3557 3558 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3559 assert(VM_Version::supports_evex(), ""); 3560 InstructionMark im(this); 3561 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3562 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 3563 attributes.reset_is_clear_context(); 3564 attributes.set_is_evex_instruction(); 3565 int dst_enc = kdst->encoding(); 3566 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3567 emit_int8(0x76); 3568 emit_operand(as_Register(dst_enc), src); 3569 } 3570 3571 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3572 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { 3573 assert(VM_Version::supports_sse4_1(), ""); 3574 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3575 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3576 emit_int16(0x29, (unsigned char)(0xC0 | encode)); 3577 } 3578 3579 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst 3580 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3581 assert(VM_Version::supports_avx(), ""); 3582 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3583 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3584 emit_int16(0x29, (unsigned char)(0xC0 | encode)); 3585 } 3586 3587 // In this context, kdst is written the mask used to process the equal components 3588 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { 3589 assert(VM_Version::supports_evex(), ""); 3590 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3591 attributes.reset_is_clear_context(); 3592 attributes.set_is_evex_instruction(); 3593 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3594 emit_int16(0x29, (unsigned char)(0xC0 | encode)); 3595 } 3596 3597 // In this context, kdst is written the mask used to process the equal components 3598 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { 3599 assert(VM_Version::supports_evex(), ""); 3600 InstructionMark im(this); 3601 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3602 attributes.reset_is_clear_context(); 3603 attributes.set_is_evex_instruction(); 3604 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 3605 int dst_enc = kdst->encoding(); 3606 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3607 emit_int8(0x29); 3608 emit_operand(as_Register(dst_enc), src); 3609 } 3610 3611 void Assembler::pmovmskb(Register dst, XMMRegister src) { 3612 assert(VM_Version::supports_sse2(), ""); 3613 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3614 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3615 emit_int16((unsigned char)0xD7, (unsigned char)(0xC0 | encode)); 3616 } 3617 3618 void Assembler::vpmovmskb(Register dst, XMMRegister src) { 3619 assert(VM_Version::supports_avx2(), ""); 3620 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 3621 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3622 emit_int16((unsigned char)0xD7, (unsigned char)(0xC0 | encode)); 3623 } 3624 3625 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 3626 assert(VM_Version::supports_sse4_1(), ""); 3627 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3628 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3629 emit_int24(0x16, (unsigned char)(0xC0 | encode), imm8); 3630 } 3631 3632 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { 3633 assert(VM_Version::supports_sse4_1(), ""); 3634 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3635 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3636 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3637 emit_int8(0x16); 3638 emit_operand(src, dst); 3639 emit_int8(imm8); 3640 } 3641 3642 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 3643 assert(VM_Version::supports_sse4_1(), ""); 3644 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3645 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3646 emit_int24(0x16, (unsigned char)(0xC0 | encode), imm8); 3647 } 3648 3649 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { 3650 assert(VM_Version::supports_sse4_1(), ""); 3651 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3652 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3653 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3654 emit_int8(0x16); 3655 emit_operand(src, dst); 3656 emit_int8(imm8); 3657 } 3658 3659 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { 3660 assert(VM_Version::supports_sse2(), ""); 3661 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3662 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3663 emit_int24((unsigned char)0xC5, (unsigned char)(0xC0 | encode), imm8); 3664 } 3665 3666 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { 3667 assert(VM_Version::supports_sse4_1(), ""); 3668 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3669 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3670 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3671 emit_int8((unsigned char)0x15); 3672 emit_operand(src, dst); 3673 emit_int8(imm8); 3674 } 3675 3676 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) { 3677 assert(VM_Version::supports_sse4_1(), ""); 3678 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3679 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3680 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3681 emit_int8(0x14); 3682 emit_operand(src, dst); 3683 emit_int8(imm8); 3684 } 3685 3686 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 3687 assert(VM_Version::supports_sse4_1(), ""); 3688 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3689 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3690 emit_int24(0x22, (unsigned char)(0xC0 | encode), imm8); 3691 } 3692 3693 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { 3694 assert(VM_Version::supports_sse4_1(), ""); 3695 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3696 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 3697 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3698 emit_int8(0x22); 3699 emit_operand(dst,src); 3700 emit_int8(imm8); 3701 } 3702 3703 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 3704 assert(VM_Version::supports_sse4_1(), ""); 3705 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3706 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3707 emit_int24(0x22, (unsigned char)(0xC0 | encode), imm8); 3708 } 3709 3710 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { 3711 assert(VM_Version::supports_sse4_1(), ""); 3712 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 3713 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 3714 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3715 emit_int8(0x22); 3716 emit_operand(dst, src); 3717 emit_int8(imm8); 3718 } 3719 3720 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { 3721 assert(VM_Version::supports_sse2(), ""); 3722 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3723 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3724 emit_int24((unsigned char)0xC4, (unsigned char)(0xC0 | encode), imm8); 3725 } 3726 3727 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { 3728 assert(VM_Version::supports_sse2(), ""); 3729 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3730 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 3731 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3732 emit_int8((unsigned char)0xC4); 3733 emit_operand(dst, src); 3734 emit_int8(imm8); 3735 } 3736 3737 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) { 3738 assert(VM_Version::supports_sse4_1(), ""); 3739 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3740 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 3741 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 3742 emit_int8(0x20); 3743 emit_operand(dst, src); 3744 emit_int8(imm8); 3745 } 3746 3747 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 3748 assert(VM_Version::supports_sse4_1(), ""); 3749 InstructionMark im(this); 3750 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3751 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3752 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3753 emit_int8(0x30); 3754 emit_operand(dst, src); 3755 } 3756 3757 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3758 assert(VM_Version::supports_sse4_1(), ""); 3759 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3760 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3761 emit_int16(0x30, (unsigned char)(0xC0 | encode)); 3762 } 3763 3764 void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { 3765 assert(VM_Version::supports_sse4_1(), ""); 3766 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3767 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3768 emit_int16(0x20, (unsigned char)(0xC0 | encode)); 3769 } 3770 3771 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3772 assert(VM_Version::supports_avx(), ""); 3773 InstructionMark im(this); 3774 assert(dst != xnoreg, "sanity"); 3775 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3776 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3777 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3778 emit_int8(0x30); 3779 emit_operand(dst, src); 3780 } 3781 3782 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { 3783 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3784 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3785 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 3786 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3787 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3788 emit_int16(0x30, (unsigned char) (0xC0 | encode)); 3789 } 3790 3791 void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { 3792 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3793 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3794 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 3795 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3796 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3797 emit_int16(0x20, (unsigned char)(0xC0 | encode)); 3798 } 3799 3800 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { 3801 assert(VM_Version::supports_avx512vlbw(), ""); 3802 assert(dst != xnoreg, "sanity"); 3803 InstructionMark im(this); 3804 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3805 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3806 attributes.set_embedded_opmask_register_specifier(mask); 3807 attributes.set_is_evex_instruction(); 3808 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3809 emit_int8(0x30); 3810 emit_operand(dst, src); 3811 } 3812 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) { 3813 assert(VM_Version::supports_avx512vlbw(), ""); 3814 assert(src != xnoreg, "sanity"); 3815 InstructionMark im(this); 3816 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3817 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3818 attributes.set_is_evex_instruction(); 3819 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3820 emit_int8(0x30); 3821 emit_operand(src, dst); 3822 } 3823 3824 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) { 3825 assert(VM_Version::supports_avx512vlbw(), ""); 3826 assert(src != xnoreg, "sanity"); 3827 InstructionMark im(this); 3828 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 3829 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit); 3830 attributes.reset_is_clear_context(); 3831 attributes.set_embedded_opmask_register_specifier(mask); 3832 attributes.set_is_evex_instruction(); 3833 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3834 emit_int8(0x30); 3835 emit_operand(src, dst); 3836 } 3837 3838 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) { 3839 assert(VM_Version::supports_evex(), ""); 3840 assert(src != xnoreg, "sanity"); 3841 InstructionMark im(this); 3842 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3843 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit); 3844 attributes.set_is_evex_instruction(); 3845 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes); 3846 emit_int8(0x31); 3847 emit_operand(src, dst); 3848 } 3849 3850 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) { 3851 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3852 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3853 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); 3854 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3855 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3856 emit_int16(0x33, (unsigned char)(0xC0 | encode)); 3857 } 3858 3859 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { 3860 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 3861 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3862 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3863 emit_int16((unsigned char)0xF5, (unsigned char)(0xC0 | encode)); 3864 } 3865 3866 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3867 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 3868 (vector_len == AVX_256bit ? VM_Version::supports_avx2() : 3869 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); 3870 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3871 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 3872 emit_int16((unsigned char)0xF5, (unsigned char)(0xC0 | encode)); 3873 } 3874 3875 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3876 assert(VM_Version::supports_evex(), ""); 3877 assert(VM_Version::supports_avx512_vnni(), "must support vnni"); 3878 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3879 attributes.set_is_evex_instruction(); 3880 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3881 emit_int16(0x52, (unsigned char)(0xC0 | encode)); 3882 } 3883 3884 // generic 3885 void Assembler::pop(Register dst) { 3886 int encode = prefix_and_encode(dst->encoding()); 3887 emit_int8(0x58 | encode); 3888 } 3889 3890 void Assembler::popcntl(Register dst, Address src) { 3891 assert(VM_Version::supports_popcnt(), "must support"); 3892 InstructionMark im(this); 3893 emit_int8((unsigned char)0xF3); 3894 prefix(src, dst); 3895 emit_int16(0x0F, (unsigned char)0xB8); 3896 emit_operand(dst, src); 3897 } 3898 3899 void Assembler::popcntl(Register dst, Register src) { 3900 assert(VM_Version::supports_popcnt(), "must support"); 3901 emit_int8((unsigned char)0xF3); 3902 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 3903 emit_int24(0x0F, (unsigned char)0xB8, (unsigned char)(0xC0 | encode)); 3904 } 3905 3906 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { 3907 assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); 3908 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 3909 attributes.set_is_evex_instruction(); 3910 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3911 emit_int16(0x55, (unsigned char)(0xC0 | encode)); 3912 } 3913 3914 void Assembler::popf() { 3915 emit_int8((unsigned char)0x9D); 3916 } 3917 3918 #ifndef _LP64 // no 32bit push/pop on amd64 3919 void Assembler::popl(Address dst) { 3920 // NOTE: this will adjust stack by 8byte on 64bits 3921 InstructionMark im(this); 3922 prefix(dst); 3923 emit_int8((unsigned char)0x8F); 3924 emit_operand(rax, dst); 3925 } 3926 #endif 3927 3928 void Assembler::prefetchnta(Address src) { 3929 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3930 InstructionMark im(this); 3931 prefix(src); 3932 emit_int16(0x0F, 0x18); 3933 emit_operand(rax, src); // 0, src 3934 } 3935 3936 void Assembler::prefetchr(Address src) { 3937 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3938 InstructionMark im(this); 3939 prefix(src); 3940 emit_int16(0x0F, 0x0D); 3941 emit_operand(rax, src); // 0, src 3942 } 3943 3944 void Assembler::prefetcht0(Address src) { 3945 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3946 InstructionMark im(this); 3947 prefix(src); 3948 emit_int16(0x0F, 0x18); 3949 emit_operand(rcx, src); // 1, src 3950 } 3951 3952 void Assembler::prefetcht1(Address src) { 3953 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3954 InstructionMark im(this); 3955 prefix(src); 3956 emit_int16(0x0F, 0x18); 3957 emit_operand(rdx, src); // 2, src 3958 } 3959 3960 void Assembler::prefetcht2(Address src) { 3961 NOT_LP64(assert(VM_Version::supports_sse(), "must support")); 3962 InstructionMark im(this); 3963 prefix(src); 3964 emit_int16(0x0F, 0x18); 3965 emit_operand(rbx, src); // 3, src 3966 } 3967 3968 void Assembler::prefetchw(Address src) { 3969 assert(VM_Version::supports_3dnow_prefetch(), "must support"); 3970 InstructionMark im(this); 3971 prefix(src); 3972 emit_int16(0x0F, 0x0D); 3973 emit_operand(rcx, src); // 1, src 3974 } 3975 3976 void Assembler::prefix(Prefix p) { 3977 emit_int8(p); 3978 } 3979 3980 void Assembler::pshufb(XMMRegister dst, XMMRegister src) { 3981 assert(VM_Version::supports_ssse3(), ""); 3982 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3983 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3984 emit_int16(0x00, (unsigned char)(0xC0 | encode)); 3985 } 3986 3987 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3988 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 3989 vector_len == AVX_256bit? VM_Version::supports_avx2() : 3990 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); 3991 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 3992 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 3993 emit_int16(0x00, (unsigned char)(0xC0 | encode)); 3994 } 3995 3996 void Assembler::pshufb(XMMRegister dst, Address src) { 3997 assert(VM_Version::supports_ssse3(), ""); 3998 InstructionMark im(this); 3999 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4000 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4001 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4002 emit_int8(0x00); 4003 emit_operand(dst, src); 4004 } 4005 4006 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) { 4007 assert(isByte(mode), "invalid value"); 4008 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4009 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; 4010 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4011 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4012 emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); 4013 } 4014 4015 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { 4016 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4017 (vector_len == AVX_256bit? VM_Version::supports_avx2() : 4018 (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); 4019 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4020 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4021 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4022 emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); 4023 } 4024 4025 void Assembler::pshufd(XMMRegister dst, Address src, int mode) { 4026 assert(isByte(mode), "invalid value"); 4027 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4028 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4029 InstructionMark im(this); 4030 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4031 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4032 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4033 emit_int8(0x70); 4034 emit_operand(dst, src); 4035 emit_int8(mode & 0xFF); 4036 } 4037 4038 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 4039 assert(isByte(mode), "invalid value"); 4040 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4041 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4042 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4043 emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); 4044 } 4045 4046 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { 4047 assert(isByte(mode), "invalid value"); 4048 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4049 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4050 InstructionMark im(this); 4051 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4052 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4053 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4054 emit_int8(0x70); 4055 emit_operand(dst, src); 4056 emit_int8(mode & 0xFF); 4057 } 4058 4059 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4060 assert(VM_Version::supports_evex(), "requires EVEX support"); 4061 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); 4062 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4063 attributes.set_is_evex_instruction(); 4064 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4065 emit_int24(0x43, (unsigned char)(0xC0 | encode), imm8 & 0xFF); 4066 } 4067 4068 void Assembler::psrldq(XMMRegister dst, int shift) { 4069 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4070 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4071 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4072 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4073 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift); 4074 } 4075 4076 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4077 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4078 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4079 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 4080 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4081 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4082 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 4083 } 4084 4085 void Assembler::pslldq(XMMRegister dst, int shift) { 4086 // Shift left 128 bit value in dst XMMRegister by shift number of bytes. 4087 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4088 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4089 // XMM7 is for /7 encoding: 66 0F 73 /7 ib 4090 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4091 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift); 4092 } 4093 4094 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 4095 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : 4096 vector_len == AVX_256bit ? VM_Version::supports_avx2() : 4097 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); 4098 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4099 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4100 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 4101 } 4102 4103 void Assembler::ptest(XMMRegister dst, Address src) { 4104 assert(VM_Version::supports_sse4_1(), ""); 4105 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4106 InstructionMark im(this); 4107 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4108 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4109 emit_int8(0x17); 4110 emit_operand(dst, src); 4111 } 4112 4113 void Assembler::ptest(XMMRegister dst, XMMRegister src) { 4114 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), ""); 4115 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4116 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4117 emit_int8(0x17); 4118 emit_int8((unsigned char)(0xC0 | encode)); 4119 } 4120 4121 void Assembler::vptest(XMMRegister dst, Address src) { 4122 assert(VM_Version::supports_avx(), ""); 4123 InstructionMark im(this); 4124 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4125 assert(dst != xnoreg, "sanity"); 4126 // swap src<->dst for encoding 4127 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4128 emit_int8(0x17); 4129 emit_operand(dst, src); 4130 } 4131 4132 void Assembler::vptest(XMMRegister dst, XMMRegister src) { 4133 assert(VM_Version::supports_avx(), ""); 4134 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4135 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4136 emit_int16(0x17, (unsigned char)(0xC0 | encode)); 4137 } 4138 4139 void Assembler::punpcklbw(XMMRegister dst, Address src) { 4140 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4141 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4142 InstructionMark im(this); 4143 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4144 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 4145 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4146 emit_int8(0x60); 4147 emit_operand(dst, src); 4148 } 4149 4150 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { 4151 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4152 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); 4153 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4154 emit_int16(0x60, (unsigned char)(0xC0 | encode)); 4155 } 4156 4157 void Assembler::punpckldq(XMMRegister dst, Address src) { 4158 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4159 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); 4160 InstructionMark im(this); 4161 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4162 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 4163 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4164 emit_int8(0x62); 4165 emit_operand(dst, src); 4166 } 4167 4168 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { 4169 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4170 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4171 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4172 emit_int16(0x62, (unsigned char)(0xC0 | encode)); 4173 } 4174 4175 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { 4176 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4177 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4178 attributes.set_rex_vex_w_reverted(); 4179 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4180 emit_int16(0x6C, (unsigned char)(0xC0 | encode)); 4181 } 4182 4183 void Assembler::push(int32_t imm32) { 4184 // in 64bits we push 64bits onto the stack but only 4185 // take a 32bit immediate 4186 emit_int8(0x68); 4187 emit_int32(imm32); 4188 } 4189 4190 void Assembler::push(Register src) { 4191 int encode = prefix_and_encode(src->encoding()); 4192 emit_int8(0x50 | encode); 4193 } 4194 4195 void Assembler::pushf() { 4196 emit_int8((unsigned char)0x9C); 4197 } 4198 4199 #ifndef _LP64 // no 32bit push/pop on amd64 4200 void Assembler::pushl(Address src) { 4201 // Note this will push 64bit on 64bit 4202 InstructionMark im(this); 4203 prefix(src); 4204 emit_int8((unsigned char)0xFF); 4205 emit_operand(rsi, src); 4206 } 4207 #endif 4208 4209 void Assembler::rcll(Register dst, int imm8) { 4210 assert(isShiftCount(imm8), "illegal shift count"); 4211 int encode = prefix_and_encode(dst->encoding()); 4212 if (imm8 == 1) { 4213 emit_int16((unsigned char)0xD1, (unsigned char)(0xD0 | encode)); 4214 } else { 4215 emit_int24((unsigned char)0xC1, (unsigned char)0xD0 | encode, imm8); 4216 } 4217 } 4218 4219 void Assembler::rcpps(XMMRegister dst, XMMRegister src) { 4220 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4221 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4222 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4223 emit_int16(0x53, (unsigned char)(0xC0 | encode)); 4224 } 4225 4226 void Assembler::rcpss(XMMRegister dst, XMMRegister src) { 4227 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4228 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4229 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4230 emit_int16(0x53, (unsigned char)(0xC0 | encode)); 4231 } 4232 4233 void Assembler::rdtsc() { 4234 emit_int16((unsigned char)0x0F, (unsigned char)0x31); 4235 } 4236 4237 // copies data from [esi] to [edi] using rcx pointer sized words 4238 // generic 4239 void Assembler::rep_mov() { 4240 // REP 4241 // MOVSQ 4242 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);) 4243 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xA5);) 4244 } 4245 4246 // sets rcx bytes with rax, value at [edi] 4247 void Assembler::rep_stosb() { 4248 // REP 4249 // STOSB 4250 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);) 4251 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAA);) 4252 } 4253 4254 // sets rcx pointer sized words with rax, value at [edi] 4255 // generic 4256 void Assembler::rep_stos() { 4257 // REP 4258 // LP64:STOSQ, LP32:STOSD 4259 LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);) 4260 NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAB);) 4261 } 4262 4263 // scans rcx pointer sized words at [edi] for occurance of rax, 4264 // generic 4265 void Assembler::repne_scan() { // repne_scan 4266 // SCASQ 4267 LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);) 4268 NOT_LP64( emit_int16((unsigned char)0xF2, (unsigned char)0xAF);) 4269 } 4270 4271 #ifdef _LP64 4272 // scans rcx 4 byte words at [edi] for occurance of rax, 4273 // generic 4274 void Assembler::repne_scanl() { // repne_scan 4275 // SCASL 4276 emit_int16((unsigned char)0xF2, (unsigned char)0xAF); 4277 } 4278 #endif 4279 4280 void Assembler::ret(int imm16) { 4281 if (imm16 == 0) { 4282 emit_int8((unsigned char)0xC3); 4283 } else { 4284 emit_int8((unsigned char)0xC2); 4285 emit_int16(imm16); 4286 } 4287 } 4288 4289 void Assembler::sahf() { 4290 #ifdef _LP64 4291 // Not supported in 64bit mode 4292 ShouldNotReachHere(); 4293 #endif 4294 emit_int8((unsigned char)0x9E); 4295 } 4296 4297 void Assembler::sarl(Register dst, int imm8) { 4298 int encode = prefix_and_encode(dst->encoding()); 4299 assert(isShiftCount(imm8), "illegal shift count"); 4300 if (imm8 == 1) { 4301 emit_int16((unsigned char)0xD1, (unsigned char)(0xF8 | encode)); 4302 } else { 4303 emit_int24((unsigned char)0xC1, (unsigned char)(0xF8 | encode), imm8); 4304 } 4305 } 4306 4307 void Assembler::sarl(Register dst) { 4308 int encode = prefix_and_encode(dst->encoding()); 4309 emit_int16((unsigned char)0xD3, (unsigned char)(0xF8 | encode)); 4310 } 4311 4312 void Assembler::sbbl(Address dst, int32_t imm32) { 4313 InstructionMark im(this); 4314 prefix(dst); 4315 emit_arith_operand(0x81, rbx, dst, imm32); 4316 } 4317 4318 void Assembler::sbbl(Register dst, int32_t imm32) { 4319 prefix(dst); 4320 emit_arith(0x81, 0xD8, dst, imm32); 4321 } 4322 4323 4324 void Assembler::sbbl(Register dst, Address src) { 4325 InstructionMark im(this); 4326 prefix(src, dst); 4327 emit_int8(0x1B); 4328 emit_operand(dst, src); 4329 } 4330 4331 void Assembler::sbbl(Register dst, Register src) { 4332 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4333 emit_arith(0x1B, 0xC0, dst, src); 4334 } 4335 4336 void Assembler::setb(Condition cc, Register dst) { 4337 assert(0 <= cc && cc < 16, "illegal cc"); 4338 int encode = prefix_and_encode(dst->encoding(), true); 4339 emit_int24(0x0F, (unsigned char)0x90 | cc, (unsigned char)(0xC0 | encode)); 4340 } 4341 4342 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { 4343 assert(VM_Version::supports_ssse3(), ""); 4344 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4345 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4346 emit_int24((unsigned char)0x0F, (unsigned char)(0xC0 | encode), imm8); 4347 } 4348 4349 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 4350 assert(vector_len == AVX_128bit? VM_Version::supports_avx() : 4351 vector_len == AVX_256bit? VM_Version::supports_avx2() : 4352 0, ""); 4353 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 4354 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4355 emit_int24((unsigned char)0x0F, (unsigned char)(0xC0 | encode), imm8); 4356 } 4357 4358 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 4359 assert(VM_Version::supports_evex(), ""); 4360 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4361 attributes.set_is_evex_instruction(); 4362 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4363 emit_int24(0x3, (unsigned char)(0xC0 | encode), imm8); 4364 } 4365 4366 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { 4367 assert(VM_Version::supports_sse4_1(), ""); 4368 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4369 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4370 emit_int24((unsigned char)0x0E, (unsigned char)(0xC0 | encode), imm8); 4371 } 4372 4373 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { 4374 assert(VM_Version::supports_sha(), ""); 4375 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); 4376 emit_int24((unsigned char)0xCC, (unsigned char)(0xC0 | encode), (unsigned char)imm8); 4377 } 4378 4379 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { 4380 assert(VM_Version::supports_sha(), ""); 4381 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4382 emit_int16((unsigned char)0xC8, (unsigned char)(0xC0 | encode)); 4383 } 4384 4385 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { 4386 assert(VM_Version::supports_sha(), ""); 4387 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4388 emit_int16((unsigned char)0xC9, (unsigned char)(0xC0 | encode)); 4389 } 4390 4391 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { 4392 assert(VM_Version::supports_sha(), ""); 4393 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4394 emit_int16((unsigned char)0xCA, (unsigned char)(0xC0 | encode)); 4395 } 4396 4397 // xmm0 is implicit additional source to this instruction. 4398 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { 4399 assert(VM_Version::supports_sha(), ""); 4400 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4401 emit_int16((unsigned char)0xCB, (unsigned char)(0xC0 | encode)); 4402 } 4403 4404 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { 4405 assert(VM_Version::supports_sha(), ""); 4406 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4407 emit_int16((unsigned char)0xCC, (unsigned char)(0xC0 | encode)); 4408 } 4409 4410 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { 4411 assert(VM_Version::supports_sha(), ""); 4412 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); 4413 emit_int16((unsigned char)0xCD, (unsigned char)(0xC0 | encode)); 4414 } 4415 4416 4417 void Assembler::shll(Register dst, int imm8) { 4418 assert(isShiftCount(imm8), "illegal shift count"); 4419 int encode = prefix_and_encode(dst->encoding()); 4420 if (imm8 == 1 ) { 4421 emit_int16((unsigned char)0xD1, (unsigned char)(0xE0 | encode)); 4422 } else { 4423 emit_int24((unsigned char)0xC1, (unsigned char)(0xE0 | encode), imm8); 4424 } 4425 } 4426 4427 void Assembler::shll(Register dst) { 4428 int encode = prefix_and_encode(dst->encoding()); 4429 emit_int16((unsigned char)0xD3, (unsigned char)(0xE0 | encode)); 4430 } 4431 4432 void Assembler::shrl(Register dst, int imm8) { 4433 assert(isShiftCount(imm8), "illegal shift count"); 4434 int encode = prefix_and_encode(dst->encoding()); 4435 emit_int24((unsigned char)0xC1, (unsigned char)(0xE8 | encode), imm8); 4436 } 4437 4438 void Assembler::shrl(Register dst) { 4439 int encode = prefix_and_encode(dst->encoding()); 4440 emit_int16((unsigned char)0xD3, (unsigned char)(0xE8 | encode)); 4441 } 4442 4443 void Assembler::shldl(Register dst, Register src) { 4444 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4445 emit_int24(0x0F, (unsigned char)0xA5, (unsigned char)(0xC0 | encode)); 4446 } 4447 4448 void Assembler::shldl(Register dst, Register src, int8_t imm8) { 4449 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4450 emit_int32(0x0F, (unsigned char)0xA4, (unsigned char)(0xC0 | encode), imm8); 4451 } 4452 4453 void Assembler::shrdl(Register dst, Register src) { 4454 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4455 emit_int24(0x0F, (unsigned char)0xAD, (unsigned char)(0xC0 | encode)); 4456 } 4457 4458 void Assembler::shrdl(Register dst, Register src, int8_t imm8) { 4459 int encode = prefix_and_encode(src->encoding(), dst->encoding()); 4460 emit_int32(0x0F, (unsigned char)0xAC, (unsigned char)(0xC0 | encode), imm8); 4461 } 4462 4463 // copies a single word from [esi] to [edi] 4464 void Assembler::smovl() { 4465 emit_int8((unsigned char)0xA5); 4466 } 4467 4468 void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { 4469 assert(VM_Version::supports_sse4_1(), ""); 4470 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4471 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4472 emit_int24(0x0B, (unsigned char)(0xC0 | encode), (unsigned char)rmode); 4473 } 4474 4475 void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { 4476 assert(VM_Version::supports_sse4_1(), ""); 4477 InstructionMark im(this); 4478 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4479 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 4480 emit_int8(0x0B); 4481 emit_operand(dst, src); 4482 emit_int8((unsigned char)rmode); 4483 } 4484 4485 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { 4486 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4487 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4488 attributes.set_rex_vex_w_reverted(); 4489 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4490 emit_int16(0x51, (unsigned char)(0xC0 | encode)); 4491 } 4492 4493 void Assembler::sqrtsd(XMMRegister dst, Address src) { 4494 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4495 InstructionMark im(this); 4496 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4497 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4498 attributes.set_rex_vex_w_reverted(); 4499 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4500 emit_int8(0x51); 4501 emit_operand(dst, src); 4502 } 4503 4504 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 4505 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4506 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4507 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4508 emit_int16(0x51, (unsigned char)(0xC0 | encode)); 4509 } 4510 4511 void Assembler::std() { 4512 emit_int8((unsigned char)0xFD); 4513 } 4514 4515 void Assembler::sqrtss(XMMRegister dst, Address src) { 4516 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4517 InstructionMark im(this); 4518 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4519 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4520 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4521 emit_int8(0x51); 4522 emit_operand(dst, src); 4523 } 4524 4525 void Assembler::stmxcsr( Address dst) { 4526 if (UseAVX > 0 ) { 4527 assert(VM_Version::supports_avx(), ""); 4528 InstructionMark im(this); 4529 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 4530 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4531 emit_int8((unsigned char)0xAE); 4532 emit_operand(as_Register(3), dst); 4533 } else { 4534 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4535 InstructionMark im(this); 4536 prefix(dst); 4537 emit_int16(0x0F, (unsigned char)0xAE); 4538 emit_operand(as_Register(3), dst); 4539 } 4540 } 4541 4542 void Assembler::subl(Address dst, int32_t imm32) { 4543 InstructionMark im(this); 4544 prefix(dst); 4545 emit_arith_operand(0x81, rbp, dst, imm32); 4546 } 4547 4548 void Assembler::subl(Address dst, Register src) { 4549 InstructionMark im(this); 4550 prefix(dst, src); 4551 emit_int8(0x29); 4552 emit_operand(src, dst); 4553 } 4554 4555 void Assembler::subl(Register dst, int32_t imm32) { 4556 prefix(dst); 4557 emit_arith(0x81, 0xE8, dst, imm32); 4558 } 4559 4560 // Force generation of a 4 byte immediate value even if it fits into 8bit 4561 void Assembler::subl_imm32(Register dst, int32_t imm32) { 4562 prefix(dst); 4563 emit_arith_imm32(0x81, 0xE8, dst, imm32); 4564 } 4565 4566 void Assembler::subl(Register dst, Address src) { 4567 InstructionMark im(this); 4568 prefix(src, dst); 4569 emit_int8(0x2B); 4570 emit_operand(dst, src); 4571 } 4572 4573 void Assembler::subl(Register dst, Register src) { 4574 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4575 emit_arith(0x2B, 0xC0, dst, src); 4576 } 4577 4578 void Assembler::subsd(XMMRegister dst, XMMRegister src) { 4579 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4580 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4581 attributes.set_rex_vex_w_reverted(); 4582 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4583 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 4584 } 4585 4586 void Assembler::subsd(XMMRegister dst, Address src) { 4587 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4588 InstructionMark im(this); 4589 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4590 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4591 attributes.set_rex_vex_w_reverted(); 4592 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4593 emit_int8(0x5C); 4594 emit_operand(dst, src); 4595 } 4596 4597 void Assembler::subss(XMMRegister dst, XMMRegister src) { 4598 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4599 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); 4600 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4601 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 4602 } 4603 4604 void Assembler::subss(XMMRegister dst, Address src) { 4605 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4606 InstructionMark im(this); 4607 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4608 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4609 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4610 emit_int8(0x5C); 4611 emit_operand(dst, src); 4612 } 4613 4614 void Assembler::testb(Register dst, int imm8) { 4615 NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); 4616 (void) prefix_and_encode(dst->encoding(), true); 4617 emit_arith_b(0xF6, 0xC0, dst, imm8); 4618 } 4619 4620 void Assembler::testb(Address dst, int imm8) { 4621 InstructionMark im(this); 4622 prefix(dst); 4623 emit_int8((unsigned char)0xF6); 4624 emit_operand(rax, dst, 1); 4625 emit_int8(imm8); 4626 } 4627 4628 void Assembler::testl(Register dst, int32_t imm32) { 4629 // not using emit_arith because test 4630 // doesn't support sign-extension of 4631 // 8bit operands 4632 int encode = dst->encoding(); 4633 if (encode == 0) { 4634 emit_int8((unsigned char)0xA9); 4635 } else { 4636 encode = prefix_and_encode(encode); 4637 emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); 4638 } 4639 emit_int32(imm32); 4640 } 4641 4642 void Assembler::testl(Register dst, Register src) { 4643 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4644 emit_arith(0x85, 0xC0, dst, src); 4645 } 4646 4647 void Assembler::testl(Register dst, Address src) { 4648 InstructionMark im(this); 4649 prefix(src, dst); 4650 emit_int8((unsigned char)0x85); 4651 emit_operand(dst, src); 4652 } 4653 4654 void Assembler::tzcntl(Register dst, Register src) { 4655 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4656 emit_int8((unsigned char)0xF3); 4657 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4658 emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)0xC0 | encode); 4659 } 4660 4661 void Assembler::tzcntq(Register dst, Register src) { 4662 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); 4663 emit_int8((unsigned char)0xF3); 4664 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 4665 emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); 4666 } 4667 4668 void Assembler::ucomisd(XMMRegister dst, Address src) { 4669 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4670 InstructionMark im(this); 4671 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4672 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4673 attributes.set_rex_vex_w_reverted(); 4674 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4675 emit_int8(0x2E); 4676 emit_operand(dst, src); 4677 } 4678 4679 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { 4680 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4681 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4682 attributes.set_rex_vex_w_reverted(); 4683 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4684 emit_int16(0x2E, (unsigned char)(0xC0 | encode)); 4685 } 4686 4687 void Assembler::ucomiss(XMMRegister dst, Address src) { 4688 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4689 InstructionMark im(this); 4690 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4691 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4692 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4693 emit_int8(0x2E); 4694 emit_operand(dst, src); 4695 } 4696 4697 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { 4698 NOT_LP64(assert(VM_Version::supports_sse(), "")); 4699 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4700 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4701 emit_int16(0x2E, (unsigned char)(0xC0 | encode)); 4702 } 4703 4704 void Assembler::xabort(int8_t imm8) { 4705 emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (unsigned char)(imm8 & 0xFF)); 4706 } 4707 4708 void Assembler::xaddb(Address dst, Register src) { 4709 InstructionMark im(this); 4710 prefix(dst, src, true); 4711 emit_int16(0x0F, (unsigned char)0xC0); 4712 emit_operand(src, dst); 4713 } 4714 4715 void Assembler::xaddw(Address dst, Register src) { 4716 InstructionMark im(this); 4717 emit_int8(0x66); 4718 prefix(dst, src); 4719 emit_int16(0x0F, (unsigned char)0xC1); 4720 emit_operand(src, dst); 4721 } 4722 4723 void Assembler::xaddl(Address dst, Register src) { 4724 InstructionMark im(this); 4725 prefix(dst, src); 4726 emit_int16(0x0F, (unsigned char)0xC1); 4727 emit_operand(src, dst); 4728 } 4729 4730 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { 4731 InstructionMark im(this); 4732 relocate(rtype); 4733 if (abort.is_bound()) { 4734 address entry = target(abort); 4735 assert(entry != NULL, "abort entry NULL"); 4736 intptr_t offset = entry - pc(); 4737 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 4738 emit_int32(offset - 6); // 2 opcode + 4 address 4739 } else { 4740 abort.add_patch_at(code(), locator()); 4741 emit_int16((unsigned char)0xC7, (unsigned char)0xF8); 4742 emit_int32(0); 4743 } 4744 } 4745 4746 void Assembler::xchgb(Register dst, Address src) { // xchg 4747 InstructionMark im(this); 4748 prefix(src, dst, true); 4749 emit_int8((unsigned char)0x86); 4750 emit_operand(dst, src); 4751 } 4752 4753 void Assembler::xchgw(Register dst, Address src) { // xchg 4754 InstructionMark im(this); 4755 emit_int8(0x66); 4756 prefix(src, dst); 4757 emit_int8((unsigned char)0x87); 4758 emit_operand(dst, src); 4759 } 4760 4761 void Assembler::xchgl(Register dst, Address src) { // xchg 4762 InstructionMark im(this); 4763 prefix(src, dst); 4764 emit_int8((unsigned char)0x87); 4765 emit_operand(dst, src); 4766 } 4767 4768 void Assembler::xchgl(Register dst, Register src) { 4769 int encode = prefix_and_encode(dst->encoding(), src->encoding()); 4770 emit_int16((unsigned char)0x87, (unsigned char)(0xC0 | encode)); 4771 } 4772 4773 void Assembler::xend() { 4774 emit_int24((unsigned char)0x0F, (unsigned char)0x01, (unsigned char)0xD5); 4775 } 4776 4777 void Assembler::xgetbv() { 4778 emit_int24(0x0F, 0x01, (unsigned char)0xD0); 4779 } 4780 4781 void Assembler::xorl(Register dst, int32_t imm32) { 4782 prefix(dst); 4783 emit_arith(0x81, 0xF0, dst, imm32); 4784 } 4785 4786 void Assembler::xorl(Register dst, Address src) { 4787 InstructionMark im(this); 4788 prefix(src, dst); 4789 emit_int8(0x33); 4790 emit_operand(dst, src); 4791 } 4792 4793 void Assembler::xorl(Register dst, Register src) { 4794 (void) prefix_and_encode(dst->encoding(), src->encoding()); 4795 emit_arith(0x33, 0xC0, dst, src); 4796 } 4797 4798 void Assembler::xorb(Register dst, Address src) { 4799 InstructionMark im(this); 4800 prefix(src, dst); 4801 emit_int8(0x32); 4802 emit_operand(dst, src); 4803 } 4804 4805 // AVX 3-operands scalar float-point arithmetic instructions 4806 4807 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) { 4808 assert(VM_Version::supports_avx(), ""); 4809 InstructionMark im(this); 4810 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4811 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4812 attributes.set_rex_vex_w_reverted(); 4813 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4814 emit_int8(0x58); 4815 emit_operand(dst, src); 4816 } 4817 4818 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4819 assert(VM_Version::supports_avx(), ""); 4820 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4821 attributes.set_rex_vex_w_reverted(); 4822 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4823 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 4824 } 4825 4826 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { 4827 assert(VM_Version::supports_avx(), ""); 4828 InstructionMark im(this); 4829 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4830 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4831 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4832 emit_int8(0x58); 4833 emit_operand(dst, src); 4834 } 4835 4836 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4837 assert(VM_Version::supports_avx(), ""); 4838 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4839 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4840 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 4841 } 4842 4843 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { 4844 assert(VM_Version::supports_avx(), ""); 4845 InstructionMark im(this); 4846 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4847 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4848 attributes.set_rex_vex_w_reverted(); 4849 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4850 emit_int8(0x5E); 4851 emit_operand(dst, src); 4852 } 4853 4854 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4855 assert(VM_Version::supports_avx(), ""); 4856 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4857 attributes.set_rex_vex_w_reverted(); 4858 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4859 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 4860 } 4861 4862 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { 4863 assert(VM_Version::supports_avx(), ""); 4864 InstructionMark im(this); 4865 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4866 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4867 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4868 emit_int8(0x5E); 4869 emit_operand(dst, src); 4870 } 4871 4872 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4873 assert(VM_Version::supports_avx(), ""); 4874 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4875 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4876 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 4877 } 4878 4879 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 4880 assert(VM_Version::supports_fma(), ""); 4881 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4882 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4883 emit_int16((unsigned char)0xB9, (unsigned char)(0xC0 | encode)); 4884 } 4885 4886 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { 4887 assert(VM_Version::supports_fma(), ""); 4888 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4889 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 4890 emit_int16((unsigned char)0xB9, (unsigned char)(0xC0 | encode)); 4891 } 4892 4893 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { 4894 assert(VM_Version::supports_avx(), ""); 4895 InstructionMark im(this); 4896 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4897 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4898 attributes.set_rex_vex_w_reverted(); 4899 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4900 emit_int8(0x59); 4901 emit_operand(dst, src); 4902 } 4903 4904 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4905 assert(VM_Version::supports_avx(), ""); 4906 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4907 attributes.set_rex_vex_w_reverted(); 4908 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4909 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 4910 } 4911 4912 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { 4913 assert(VM_Version::supports_avx(), ""); 4914 InstructionMark im(this); 4915 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4916 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4917 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4918 emit_int8(0x59); 4919 emit_operand(dst, src); 4920 } 4921 4922 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4923 assert(VM_Version::supports_avx(), ""); 4924 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4925 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4926 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 4927 } 4928 4929 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { 4930 assert(VM_Version::supports_avx(), ""); 4931 InstructionMark im(this); 4932 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4933 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 4934 attributes.set_rex_vex_w_reverted(); 4935 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4936 emit_int8(0x5C); 4937 emit_operand(dst, src); 4938 } 4939 4940 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4941 assert(VM_Version::supports_avx(), ""); 4942 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4943 attributes.set_rex_vex_w_reverted(); 4944 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 4945 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 4946 } 4947 4948 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { 4949 assert(VM_Version::supports_avx(), ""); 4950 InstructionMark im(this); 4951 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4952 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 4953 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4954 emit_int8(0x5C); 4955 emit_operand(dst, src); 4956 } 4957 4958 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 4959 assert(VM_Version::supports_avx(), ""); 4960 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 4961 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 4962 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 4963 } 4964 4965 //====================VECTOR ARITHMETIC===================================== 4966 4967 // Float-point vector arithmetic 4968 4969 void Assembler::addpd(XMMRegister dst, XMMRegister src) { 4970 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4971 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4972 attributes.set_rex_vex_w_reverted(); 4973 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4974 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 4975 } 4976 4977 void Assembler::addpd(XMMRegister dst, Address src) { 4978 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4979 InstructionMark im(this); 4980 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4981 attributes.set_rex_vex_w_reverted(); 4982 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 4983 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 4984 emit_int8(0x58); 4985 emit_operand(dst, src); 4986 } 4987 4988 4989 void Assembler::addps(XMMRegister dst, XMMRegister src) { 4990 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 4991 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4992 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 4993 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 4994 } 4995 4996 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4997 assert(VM_Version::supports_avx(), ""); 4998 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 4999 attributes.set_rex_vex_w_reverted(); 5000 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5001 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 5002 } 5003 5004 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5005 assert(VM_Version::supports_avx(), ""); 5006 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5007 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5008 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 5009 } 5010 5011 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5012 assert(VM_Version::supports_avx(), ""); 5013 InstructionMark im(this); 5014 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5015 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5016 attributes.set_rex_vex_w_reverted(); 5017 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5018 emit_int8(0x58); 5019 emit_operand(dst, src); 5020 } 5021 5022 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5023 assert(VM_Version::supports_avx(), ""); 5024 InstructionMark im(this); 5025 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5026 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5027 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5028 emit_int8(0x58); 5029 emit_operand(dst, src); 5030 } 5031 5032 void Assembler::subpd(XMMRegister dst, XMMRegister src) { 5033 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5034 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5035 attributes.set_rex_vex_w_reverted(); 5036 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5037 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 5038 } 5039 5040 void Assembler::subps(XMMRegister dst, XMMRegister src) { 5041 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5042 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5043 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5044 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 5045 } 5046 5047 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5048 assert(VM_Version::supports_avx(), ""); 5049 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5050 attributes.set_rex_vex_w_reverted(); 5051 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5052 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 5053 } 5054 5055 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5056 assert(VM_Version::supports_avx(), ""); 5057 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5058 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5059 emit_int16(0x5C, (unsigned char)(0xC0 | encode)); 5060 } 5061 5062 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5063 assert(VM_Version::supports_avx(), ""); 5064 InstructionMark im(this); 5065 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5066 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5067 attributes.set_rex_vex_w_reverted(); 5068 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5069 emit_int8(0x5C); 5070 emit_operand(dst, src); 5071 } 5072 5073 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5074 assert(VM_Version::supports_avx(), ""); 5075 InstructionMark im(this); 5076 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5077 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5078 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5079 emit_int8(0x5C); 5080 emit_operand(dst, src); 5081 } 5082 5083 void Assembler::mulpd(XMMRegister dst, XMMRegister src) { 5084 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5085 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5086 attributes.set_rex_vex_w_reverted(); 5087 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5088 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 5089 } 5090 5091 void Assembler::mulpd(XMMRegister dst, Address src) { 5092 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5093 InstructionMark im(this); 5094 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5095 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5096 attributes.set_rex_vex_w_reverted(); 5097 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5098 emit_int8(0x59); 5099 emit_operand(dst, src); 5100 } 5101 5102 void Assembler::mulps(XMMRegister dst, XMMRegister src) { 5103 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5104 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5105 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5106 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 5107 } 5108 5109 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5110 assert(VM_Version::supports_avx(), ""); 5111 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5112 attributes.set_rex_vex_w_reverted(); 5113 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5114 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 5115 } 5116 5117 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5118 assert(VM_Version::supports_avx(), ""); 5119 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5120 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5121 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 5122 } 5123 5124 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5125 assert(VM_Version::supports_avx(), ""); 5126 InstructionMark im(this); 5127 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5128 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5129 attributes.set_rex_vex_w_reverted(); 5130 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5131 emit_int8(0x59); 5132 emit_operand(dst, src); 5133 } 5134 5135 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5136 assert(VM_Version::supports_avx(), ""); 5137 InstructionMark im(this); 5138 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5139 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5140 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5141 emit_int8(0x59); 5142 emit_operand(dst, src); 5143 } 5144 5145 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5146 assert(VM_Version::supports_fma(), ""); 5147 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5148 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5149 emit_int16((unsigned char)0xB8, (unsigned char)(0xC0 | encode)); 5150 } 5151 5152 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { 5153 assert(VM_Version::supports_fma(), ""); 5154 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5155 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5156 emit_int16((unsigned char)0xB8, (unsigned char)(0xC0 | encode)); 5157 } 5158 5159 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5160 assert(VM_Version::supports_fma(), ""); 5161 InstructionMark im(this); 5162 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5163 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5164 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5165 emit_int8((unsigned char)0xB8); 5166 emit_operand(dst, src2); 5167 } 5168 5169 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 5170 assert(VM_Version::supports_fma(), ""); 5171 InstructionMark im(this); 5172 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5173 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5174 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5175 emit_int8((unsigned char)0xB8); 5176 emit_operand(dst, src2); 5177 } 5178 5179 void Assembler::divpd(XMMRegister dst, XMMRegister src) { 5180 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5181 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5182 attributes.set_rex_vex_w_reverted(); 5183 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5184 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 5185 } 5186 5187 void Assembler::divps(XMMRegister dst, XMMRegister src) { 5188 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5189 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5190 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5191 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 5192 } 5193 5194 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5195 assert(VM_Version::supports_avx(), ""); 5196 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5197 attributes.set_rex_vex_w_reverted(); 5198 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5199 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 5200 } 5201 5202 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5203 assert(VM_Version::supports_avx(), ""); 5204 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5205 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5206 emit_int16(0x5E, (unsigned char)(0xC0 | encode)); 5207 } 5208 5209 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5210 assert(VM_Version::supports_avx(), ""); 5211 InstructionMark im(this); 5212 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5213 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5214 attributes.set_rex_vex_w_reverted(); 5215 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5216 emit_int8(0x5E); 5217 emit_operand(dst, src); 5218 } 5219 5220 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5221 assert(VM_Version::supports_avx(), ""); 5222 InstructionMark im(this); 5223 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5224 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5225 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5226 emit_int8(0x5E); 5227 emit_operand(dst, src); 5228 } 5229 5230 void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 5231 assert(VM_Version::supports_avx(), ""); 5232 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5233 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5234 emit_int24(0x09, (unsigned char)(0xC0 | encode), (unsigned char)(rmode)); 5235 } 5236 5237 void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 5238 assert(VM_Version::supports_avx(), ""); 5239 InstructionMark im(this); 5240 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 5241 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5242 emit_int8(0x09); 5243 emit_operand(dst, src); 5244 emit_int8((unsigned char)(rmode)); 5245 } 5246 5247 void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { 5248 assert(VM_Version::supports_evex(), "requires EVEX support"); 5249 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5250 attributes.set_is_evex_instruction(); 5251 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5252 emit_int24((unsigned char)0x09, (unsigned char)(0xC0 | encode), (unsigned char)(rmode)); 5253 } 5254 5255 void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { 5256 assert(VM_Version::supports_evex(), "requires EVEX support"); 5257 assert(dst != xnoreg, "sanity"); 5258 InstructionMark im(this); 5259 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5260 attributes.set_is_evex_instruction(); 5261 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5262 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 5263 emit_int8((unsigned char)0x09); 5264 emit_operand(dst, src); 5265 emit_int8((unsigned char)(rmode)); 5266 } 5267 5268 5269 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { 5270 assert(VM_Version::supports_avx(), ""); 5271 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5272 attributes.set_rex_vex_w_reverted(); 5273 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5274 emit_int16(0x51, (unsigned char)(0xC0 | encode)); 5275 } 5276 5277 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { 5278 assert(VM_Version::supports_avx(), ""); 5279 InstructionMark im(this); 5280 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5281 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5282 attributes.set_rex_vex_w_reverted(); 5283 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5284 emit_int8(0x51); 5285 emit_operand(dst, src); 5286 } 5287 5288 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { 5289 assert(VM_Version::supports_avx(), ""); 5290 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5291 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5292 emit_int16(0x51, (unsigned char)(0xC0 | encode)); 5293 } 5294 5295 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { 5296 assert(VM_Version::supports_avx(), ""); 5297 InstructionMark im(this); 5298 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5299 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5300 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5301 emit_int8(0x51); 5302 emit_operand(dst, src); 5303 } 5304 5305 void Assembler::andpd(XMMRegister dst, XMMRegister src) { 5306 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5307 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5308 attributes.set_rex_vex_w_reverted(); 5309 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5310 emit_int16(0x54, (unsigned char)(0xC0 | encode)); 5311 } 5312 5313 void Assembler::andps(XMMRegister dst, XMMRegister src) { 5314 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5315 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5316 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5317 emit_int16(0x54, (unsigned char)(0xC0 | encode)); 5318 } 5319 5320 void Assembler::andps(XMMRegister dst, Address src) { 5321 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5322 InstructionMark im(this); 5323 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5324 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5325 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5326 emit_int8(0x54); 5327 emit_operand(dst, src); 5328 } 5329 5330 void Assembler::andpd(XMMRegister dst, Address src) { 5331 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5332 InstructionMark im(this); 5333 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5334 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5335 attributes.set_rex_vex_w_reverted(); 5336 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5337 emit_int8(0x54); 5338 emit_operand(dst, src); 5339 } 5340 5341 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5342 assert(VM_Version::supports_avx(), ""); 5343 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5344 attributes.set_rex_vex_w_reverted(); 5345 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5346 emit_int16(0x54, (unsigned char)(0xC0 | encode)); 5347 } 5348 5349 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5350 assert(VM_Version::supports_avx(), ""); 5351 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5352 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5353 emit_int16(0x54, (unsigned char)(0xC0 | encode)); 5354 } 5355 5356 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5357 assert(VM_Version::supports_avx(), ""); 5358 InstructionMark im(this); 5359 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5360 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5361 attributes.set_rex_vex_w_reverted(); 5362 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5363 emit_int8(0x54); 5364 emit_operand(dst, src); 5365 } 5366 5367 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5368 assert(VM_Version::supports_avx(), ""); 5369 InstructionMark im(this); 5370 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5371 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5372 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5373 emit_int8(0x54); 5374 emit_operand(dst, src); 5375 } 5376 5377 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { 5378 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5379 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5380 attributes.set_rex_vex_w_reverted(); 5381 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5382 emit_int8(0x15); 5383 emit_int8((unsigned char)(0xC0 | encode)); 5384 } 5385 5386 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { 5387 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5388 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5389 attributes.set_rex_vex_w_reverted(); 5390 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5391 emit_int16(0x14, (unsigned char)(0xC0 | encode)); 5392 } 5393 5394 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { 5395 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5396 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5397 attributes.set_rex_vex_w_reverted(); 5398 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5399 emit_int16(0x57, (unsigned char)(0xC0 | encode)); 5400 } 5401 5402 void Assembler::xorps(XMMRegister dst, XMMRegister src) { 5403 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5404 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5405 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5406 emit_int16(0x57, (unsigned char)(0xC0 | encode)); 5407 } 5408 5409 void Assembler::xorpd(XMMRegister dst, Address src) { 5410 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5411 InstructionMark im(this); 5412 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5413 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5414 attributes.set_rex_vex_w_reverted(); 5415 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5416 emit_int8(0x57); 5417 emit_operand(dst, src); 5418 } 5419 5420 void Assembler::xorps(XMMRegister dst, Address src) { 5421 NOT_LP64(assert(VM_Version::supports_sse(), "")); 5422 InstructionMark im(this); 5423 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5424 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5425 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5426 emit_int8(0x57); 5427 emit_operand(dst, src); 5428 } 5429 5430 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5431 assert(VM_Version::supports_avx(), ""); 5432 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5433 attributes.set_rex_vex_w_reverted(); 5434 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5435 emit_int16(0x57, (unsigned char)(0xC0 | encode)); 5436 } 5437 5438 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5439 assert(VM_Version::supports_avx(), ""); 5440 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5441 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5442 emit_int16(0x57, (unsigned char)(0xC0 | encode)); 5443 } 5444 5445 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5446 assert(VM_Version::supports_avx(), ""); 5447 InstructionMark im(this); 5448 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5449 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5450 attributes.set_rex_vex_w_reverted(); 5451 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5452 emit_int8(0x57); 5453 emit_operand(dst, src); 5454 } 5455 5456 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5457 assert(VM_Version::supports_avx(), ""); 5458 InstructionMark im(this); 5459 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5460 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5461 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 5462 emit_int8(0x57); 5463 emit_operand(dst, src); 5464 } 5465 5466 // Integer vector arithmetic 5467 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5468 assert(VM_Version::supports_avx() && (vector_len == 0) || 5469 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 5470 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5471 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5472 emit_int16(0x01, (unsigned char)(0xC0 | encode)); 5473 } 5474 5475 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5476 assert(VM_Version::supports_avx() && (vector_len == 0) || 5477 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); 5478 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5479 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5480 emit_int16(0x02, (unsigned char)(0xC0 | encode)); 5481 } 5482 5483 void Assembler::paddb(XMMRegister dst, XMMRegister src) { 5484 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5485 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5486 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5487 emit_int16((unsigned char)0xFC, (unsigned char)(0xC0 | encode)); 5488 } 5489 5490 void Assembler::paddw(XMMRegister dst, XMMRegister src) { 5491 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5492 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5493 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5494 emit_int16((unsigned char)0xFD, (unsigned char)(0xC0 | encode)); 5495 } 5496 5497 void Assembler::paddd(XMMRegister dst, XMMRegister src) { 5498 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5499 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5500 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5501 emit_int16((unsigned char)0xFE, (unsigned char)(0xC0 | encode)); 5502 } 5503 5504 void Assembler::paddd(XMMRegister dst, Address src) { 5505 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5506 InstructionMark im(this); 5507 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5508 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5509 emit_int8((unsigned char)0xFE); 5510 emit_operand(dst, src); 5511 } 5512 5513 void Assembler::paddq(XMMRegister dst, XMMRegister src) { 5514 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5515 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5516 attributes.set_rex_vex_w_reverted(); 5517 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5518 emit_int16((unsigned char)0xD4, (unsigned char)(0xC0 | encode)); 5519 } 5520 5521 void Assembler::phaddw(XMMRegister dst, XMMRegister src) { 5522 assert(VM_Version::supports_sse3(), ""); 5523 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5524 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5525 emit_int16(0x01, (unsigned char)(0xC0 | encode)); 5526 } 5527 5528 void Assembler::phaddd(XMMRegister dst, XMMRegister src) { 5529 assert(VM_Version::supports_sse3(), ""); 5530 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 5531 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5532 emit_int16(0x02, (unsigned char)(0xC0 | encode)); 5533 } 5534 5535 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5536 assert(UseAVX > 0, "requires some form of AVX"); 5537 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5538 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5539 emit_int16((unsigned char)0xFC, (unsigned char)(0xC0 | encode)); 5540 } 5541 5542 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5543 assert(UseAVX > 0, "requires some form of AVX"); 5544 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5545 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5546 emit_int16((unsigned char)0xFD, (unsigned char)(0xC0 | encode)); 5547 } 5548 5549 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5550 assert(UseAVX > 0, "requires some form of AVX"); 5551 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5552 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5553 emit_int16((unsigned char)0xFE, (unsigned char)(0xC0 | encode)); 5554 } 5555 5556 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5557 assert(UseAVX > 0, "requires some form of AVX"); 5558 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5559 attributes.set_rex_vex_w_reverted(); 5560 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5561 emit_int16((unsigned char)0xD4, (unsigned char)(0xC0 | encode)); 5562 } 5563 5564 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5565 assert(UseAVX > 0, "requires some form of AVX"); 5566 InstructionMark im(this); 5567 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5568 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5569 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5570 emit_int8((unsigned char)0xFC); 5571 emit_operand(dst, src); 5572 } 5573 5574 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5575 assert(UseAVX > 0, "requires some form of AVX"); 5576 InstructionMark im(this); 5577 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5578 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5579 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5580 emit_int8((unsigned char)0xFD); 5581 emit_operand(dst, src); 5582 } 5583 5584 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5585 assert(UseAVX > 0, "requires some form of AVX"); 5586 InstructionMark im(this); 5587 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5588 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5589 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5590 emit_int8((unsigned char)0xFE); 5591 emit_operand(dst, src); 5592 } 5593 5594 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5595 assert(UseAVX > 0, "requires some form of AVX"); 5596 InstructionMark im(this); 5597 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5598 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5599 attributes.set_rex_vex_w_reverted(); 5600 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5601 emit_int8((unsigned char)0xD4); 5602 emit_operand(dst, src); 5603 } 5604 5605 void Assembler::psubb(XMMRegister dst, XMMRegister src) { 5606 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5607 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5608 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5609 emit_int16((unsigned char)0xF8, (unsigned char)(0xC0 | encode)); 5610 } 5611 5612 void Assembler::psubw(XMMRegister dst, XMMRegister src) { 5613 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5614 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5615 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5616 emit_int16((unsigned char)0xF9, (unsigned char)(0xC0 | encode)); 5617 } 5618 5619 void Assembler::psubd(XMMRegister dst, XMMRegister src) { 5620 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5621 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5622 emit_int16((unsigned char)0xFA, (unsigned char)(0xC0 | encode)); 5623 } 5624 5625 void Assembler::psubq(XMMRegister dst, XMMRegister src) { 5626 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5627 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5628 attributes.set_rex_vex_w_reverted(); 5629 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5630 emit_int8((unsigned char)0xFB); 5631 emit_int8((unsigned char)(0xC0 | encode)); 5632 } 5633 5634 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5635 assert(UseAVX > 0, "requires some form of AVX"); 5636 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5637 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5638 emit_int16((unsigned char)0xF8, (unsigned char)(0xC0 | encode)); 5639 } 5640 5641 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5642 assert(UseAVX > 0, "requires some form of AVX"); 5643 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5644 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5645 emit_int16((unsigned char)0xF9, (unsigned char)(0xC0 | encode)); 5646 } 5647 5648 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5649 assert(UseAVX > 0, "requires some form of AVX"); 5650 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5651 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5652 emit_int16((unsigned char)0xFA, (unsigned char)(0xC0 | encode)); 5653 } 5654 5655 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5656 assert(UseAVX > 0, "requires some form of AVX"); 5657 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5658 attributes.set_rex_vex_w_reverted(); 5659 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5660 emit_int16((unsigned char)0xFB, (unsigned char)(0xC0 | encode)); 5661 } 5662 5663 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5664 assert(UseAVX > 0, "requires some form of AVX"); 5665 InstructionMark im(this); 5666 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5667 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5668 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5669 emit_int8((unsigned char)0xF8); 5670 emit_operand(dst, src); 5671 } 5672 5673 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5674 assert(UseAVX > 0, "requires some form of AVX"); 5675 InstructionMark im(this); 5676 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5677 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5678 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5679 emit_int8((unsigned char)0xF9); 5680 emit_operand(dst, src); 5681 } 5682 5683 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5684 assert(UseAVX > 0, "requires some form of AVX"); 5685 InstructionMark im(this); 5686 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5687 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5688 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5689 emit_int8((unsigned char)0xFA); 5690 emit_operand(dst, src); 5691 } 5692 5693 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5694 assert(UseAVX > 0, "requires some form of AVX"); 5695 InstructionMark im(this); 5696 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5697 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5698 attributes.set_rex_vex_w_reverted(); 5699 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5700 emit_int8((unsigned char)0xFB); 5701 emit_operand(dst, src); 5702 } 5703 5704 void Assembler::pmullw(XMMRegister dst, XMMRegister src) { 5705 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5706 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5707 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5708 emit_int16((unsigned char)0xD5, (unsigned char)(0xC0 | encode)); 5709 } 5710 5711 void Assembler::pmulld(XMMRegister dst, XMMRegister src) { 5712 assert(VM_Version::supports_sse4_1(), ""); 5713 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5714 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5715 emit_int16(0x40, (unsigned char)(0xC0 | encode)); 5716 } 5717 5718 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5719 assert(UseAVX > 0, "requires some form of AVX"); 5720 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5721 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5722 emit_int16((unsigned char)0xD5, (unsigned char)(0xC0 | encode)); 5723 } 5724 5725 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5726 assert(UseAVX > 0, "requires some form of AVX"); 5727 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5728 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5729 emit_int16(0x40, (unsigned char)(0xC0 | encode)); 5730 } 5731 5732 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5733 assert(UseAVX > 2, "requires some form of EVEX"); 5734 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5735 attributes.set_is_evex_instruction(); 5736 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5737 emit_int16(0x40, (unsigned char)(0xC0 | encode)); 5738 } 5739 5740 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5741 assert(UseAVX > 0, "requires some form of AVX"); 5742 InstructionMark im(this); 5743 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5744 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit); 5745 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5746 emit_int8((unsigned char)0xD5); 5747 emit_operand(dst, src); 5748 } 5749 5750 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5751 assert(UseAVX > 0, "requires some form of AVX"); 5752 InstructionMark im(this); 5753 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5754 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 5755 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5756 emit_int8(0x40); 5757 emit_operand(dst, src); 5758 } 5759 5760 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 5761 assert(UseAVX > 2, "requires some form of EVEX"); 5762 InstructionMark im(this); 5763 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); 5764 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 5765 attributes.set_is_evex_instruction(); 5766 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5767 emit_int8(0x40); 5768 emit_operand(dst, src); 5769 } 5770 5771 // Shift packed integers left by specified number of bits. 5772 void Assembler::psllw(XMMRegister dst, int shift) { 5773 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5774 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5775 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5776 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5777 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 5778 } 5779 5780 void Assembler::pslld(XMMRegister dst, int shift) { 5781 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5782 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5783 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5784 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5785 emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 5786 } 5787 5788 void Assembler::psllq(XMMRegister dst, int shift) { 5789 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5790 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5791 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5792 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5793 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 5794 } 5795 5796 void Assembler::psllw(XMMRegister dst, XMMRegister shift) { 5797 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5798 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5799 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5800 emit_int16((unsigned char)0xF1, (unsigned char)(0xC0 | encode)); 5801 } 5802 5803 void Assembler::pslld(XMMRegister dst, XMMRegister shift) { 5804 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5805 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5806 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5807 emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); 5808 } 5809 5810 void Assembler::psllq(XMMRegister dst, XMMRegister shift) { 5811 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5812 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5813 attributes.set_rex_vex_w_reverted(); 5814 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5815 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 5816 } 5817 5818 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5819 assert(UseAVX > 0, "requires some form of AVX"); 5820 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5821 // XMM6 is for /6 encoding: 66 0F 71 /6 ib 5822 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5823 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 5824 } 5825 5826 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5827 assert(UseAVX > 0, "requires some form of AVX"); 5828 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5829 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5830 // XMM6 is for /6 encoding: 66 0F 72 /6 ib 5831 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5832 emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 5833 } 5834 5835 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5836 assert(UseAVX > 0, "requires some form of AVX"); 5837 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5838 attributes.set_rex_vex_w_reverted(); 5839 // XMM6 is for /6 encoding: 66 0F 73 /6 ib 5840 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5841 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 5842 } 5843 5844 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5845 assert(UseAVX > 0, "requires some form of AVX"); 5846 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5847 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5848 emit_int16((unsigned char)0xF1, (unsigned char)(0xC0 | encode)); 5849 } 5850 5851 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5852 assert(UseAVX > 0, "requires some form of AVX"); 5853 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5854 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5855 emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); 5856 } 5857 5858 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5859 assert(UseAVX > 0, "requires some form of AVX"); 5860 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5861 attributes.set_rex_vex_w_reverted(); 5862 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5863 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 5864 } 5865 5866 // Shift packed integers logically right by specified number of bits. 5867 void Assembler::psrlw(XMMRegister dst, int shift) { 5868 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5869 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5870 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5871 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5872 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 5873 } 5874 5875 void Assembler::psrld(XMMRegister dst, int shift) { 5876 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5877 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5878 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5879 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5880 emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 5881 } 5882 5883 void Assembler::psrlq(XMMRegister dst, int shift) { 5884 // Do not confuse it with psrldq SSE2 instruction which 5885 // shifts 128 bit value in xmm register by number of bytes. 5886 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5887 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5888 attributes.set_rex_vex_w_reverted(); 5889 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5890 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5891 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 5892 } 5893 5894 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { 5895 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5896 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5897 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5898 emit_int16((unsigned char)0xD1, (unsigned char)(0xC0 | encode)); 5899 } 5900 5901 void Assembler::psrld(XMMRegister dst, XMMRegister shift) { 5902 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5903 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5904 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5905 emit_int16((unsigned char)0xD2, (unsigned char)(0xC0 | encode)); 5906 } 5907 5908 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { 5909 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5910 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5911 attributes.set_rex_vex_w_reverted(); 5912 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5913 emit_int16((unsigned char)0xD3, (unsigned char)(0xC0 | encode)); 5914 } 5915 5916 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5917 assert(UseAVX > 0, "requires some form of AVX"); 5918 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5919 // XMM2 is for /2 encoding: 66 0F 71 /2 ib 5920 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5921 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 5922 } 5923 5924 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5925 assert(UseAVX > 0, "requires some form of AVX"); 5926 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5927 // XMM2 is for /2 encoding: 66 0F 72 /2 ib 5928 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5929 emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 5930 } 5931 5932 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 5933 assert(UseAVX > 0, "requires some form of AVX"); 5934 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5935 attributes.set_rex_vex_w_reverted(); 5936 // XMM2 is for /2 encoding: 66 0F 73 /2 ib 5937 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5938 emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); 5939 } 5940 5941 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5942 assert(UseAVX > 0, "requires some form of AVX"); 5943 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5944 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5945 emit_int16((unsigned char)0xD1, (unsigned char)(0xC0 | encode)); 5946 } 5947 5948 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5949 assert(UseAVX > 0, "requires some form of AVX"); 5950 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5951 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5952 emit_int16((unsigned char)0xD2, (unsigned char)(0xC0 | encode)); 5953 } 5954 5955 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 5956 assert(UseAVX > 0, "requires some form of AVX"); 5957 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5958 attributes.set_rex_vex_w_reverted(); 5959 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5960 emit_int16((unsigned char)0xD3, (unsigned char)(0xC0 | encode)); 5961 } 5962 5963 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5964 assert(VM_Version::supports_avx512bw(), ""); 5965 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5966 attributes.set_is_evex_instruction(); 5967 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5968 emit_int16(0x10, (unsigned char)(0xC0 | encode)); 5969 } 5970 5971 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 5972 assert(VM_Version::supports_avx512bw(), ""); 5973 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5974 attributes.set_is_evex_instruction(); 5975 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 5976 emit_int16(0x12, (unsigned char)(0xC0 | encode)); 5977 } 5978 5979 // Shift packed integers arithmetically right by specified number of bits. 5980 void Assembler::psraw(XMMRegister dst, int shift) { 5981 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5982 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 5983 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 5984 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5985 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 5986 } 5987 5988 void Assembler::psrad(XMMRegister dst, int shift) { 5989 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 5990 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 5991 // XMM4 is for /4 encoding: 66 0F 72 /4 ib 5992 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 5993 emit_int8(0x72); 5994 emit_int8((unsigned char)(0xC0 | encode)); 5995 emit_int8(shift & 0xFF); 5996 } 5997 5998 void Assembler::psraw(XMMRegister dst, XMMRegister shift) { 5999 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6000 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6001 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6002 emit_int16((unsigned char)0xE1, (unsigned char)(0xC0 | encode)); 6003 } 6004 6005 void Assembler::psrad(XMMRegister dst, XMMRegister shift) { 6006 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6007 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6008 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6009 emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); 6010 } 6011 6012 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6013 assert(UseAVX > 0, "requires some form of AVX"); 6014 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6015 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6016 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6017 emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); 6018 } 6019 6020 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6021 assert(UseAVX > 0, "requires some form of AVX"); 6022 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6023 // XMM4 is for /4 encoding: 66 0F 71 /4 ib 6024 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6025 emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 6026 } 6027 6028 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6029 assert(UseAVX > 0, "requires some form of AVX"); 6030 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6031 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6032 emit_int16((unsigned char)0xE1, (unsigned char)(0xC0 | encode)); 6033 } 6034 6035 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6036 assert(UseAVX > 0, "requires some form of AVX"); 6037 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6038 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6039 emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); 6040 } 6041 6042 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { 6043 assert(UseAVX > 2, "requires AVX512"); 6044 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 6045 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6046 attributes.set_is_evex_instruction(); 6047 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6048 emit_int24((unsigned char)0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); 6049 } 6050 6051 void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6052 assert(UseAVX > 2, "requires AVX512"); 6053 assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); 6054 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6055 attributes.set_is_evex_instruction(); 6056 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6057 emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); 6058 } 6059 6060 // logical operations packed integers 6061 void Assembler::pand(XMMRegister dst, XMMRegister src) { 6062 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6063 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6064 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6065 emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); 6066 } 6067 6068 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6069 assert(UseAVX > 0, "requires some form of AVX"); 6070 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6071 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6072 emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); 6073 } 6074 6075 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6076 assert(UseAVX > 0, "requires some form of AVX"); 6077 InstructionMark im(this); 6078 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6079 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6080 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6081 emit_int8((unsigned char)0xDB); 6082 emit_operand(dst, src); 6083 } 6084 6085 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6086 assert(VM_Version::supports_evex(), ""); 6087 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6088 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6089 emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); 6090 } 6091 6092 void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6093 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 6094 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6095 attributes.set_is_evex_instruction(); 6096 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6097 emit_int8(0x71); 6098 emit_int8((unsigned char)(0xC0 | encode)); 6099 } 6100 6101 void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { 6102 assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); 6103 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6104 attributes.set_is_evex_instruction(); 6105 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6106 emit_int16(0x73, (unsigned char)(0xC0 | encode)); 6107 } 6108 6109 void Assembler::pandn(XMMRegister dst, XMMRegister src) { 6110 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6111 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6112 attributes.set_rex_vex_w_reverted(); 6113 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6114 emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); 6115 } 6116 6117 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6118 assert(UseAVX > 0, "requires some form of AVX"); 6119 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6120 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6121 emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); 6122 } 6123 6124 6125 void Assembler::por(XMMRegister dst, XMMRegister src) { 6126 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6127 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6128 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6129 emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); 6130 } 6131 6132 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6133 assert(UseAVX > 0, "requires some form of AVX"); 6134 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6135 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6136 emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); 6137 } 6138 6139 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6140 assert(UseAVX > 0, "requires some form of AVX"); 6141 InstructionMark im(this); 6142 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6143 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6144 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6145 emit_int8((unsigned char)0xEB); 6146 emit_operand(dst, src); 6147 } 6148 6149 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6150 assert(VM_Version::supports_evex(), ""); 6151 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6152 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6153 emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); 6154 } 6155 6156 6157 void Assembler::pxor(XMMRegister dst, XMMRegister src) { 6158 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 6159 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6160 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6161 emit_int16((unsigned char)0xEF, (unsigned char)(0xC0 | encode)); 6162 } 6163 6164 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6165 assert(UseAVX > 0, "requires some form of AVX"); 6166 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6167 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6168 emit_int16((unsigned char)0xEF, (unsigned char)(0xC0 | encode)); 6169 } 6170 6171 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6172 assert(UseAVX > 0, "requires some form of AVX"); 6173 InstructionMark im(this); 6174 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6175 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit); 6176 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6177 emit_int8((unsigned char)0xEF); 6178 emit_operand(dst, src); 6179 } 6180 6181 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 6182 assert(VM_Version::supports_evex(), "requires EVEX support"); 6183 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6184 attributes.set_is_evex_instruction(); 6185 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6186 emit_int8((unsigned char)0xEF); 6187 emit_int8((unsigned char)(0xC0 | encode)); 6188 } 6189 6190 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 6191 assert(VM_Version::supports_evex(), "requires EVEX support"); 6192 assert(dst != xnoreg, "sanity"); 6193 InstructionMark im(this); 6194 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6195 attributes.set_is_evex_instruction(); 6196 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); 6197 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 6198 emit_int8((unsigned char)0xEF); 6199 emit_operand(dst, src); 6200 } 6201 6202 6203 // vinserti forms 6204 6205 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6206 assert(VM_Version::supports_avx2(), ""); 6207 assert(imm8 <= 0x01, "imm8: %u", imm8); 6208 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6209 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6210 // last byte: 6211 // 0x00 - insert into lower 128 bits 6212 // 0x01 - insert into upper 128 bits 6213 emit_int24(0x38, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6214 } 6215 6216 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6217 assert(VM_Version::supports_avx2(), ""); 6218 assert(dst != xnoreg, "sanity"); 6219 assert(imm8 <= 0x01, "imm8: %u", imm8); 6220 InstructionMark im(this); 6221 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6222 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6223 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6224 emit_int8(0x38); 6225 emit_operand(dst, src); 6226 // 0x00 - insert into lower 128 bits 6227 // 0x01 - insert into upper 128 bits 6228 emit_int8(imm8 & 0x01); 6229 } 6230 6231 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6232 assert(VM_Version::supports_evex(), ""); 6233 assert(imm8 <= 0x03, "imm8: %u", imm8); 6234 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6235 attributes.set_is_evex_instruction(); 6236 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6237 // imm8: 6238 // 0x00 - insert into q0 128 bits (0..127) 6239 // 0x01 - insert into q1 128 bits (128..255) 6240 // 0x02 - insert into q2 128 bits (256..383) 6241 // 0x03 - insert into q3 128 bits (384..511) 6242 emit_int24(0x38, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6243 } 6244 6245 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6246 assert(VM_Version::supports_avx(), ""); 6247 assert(dst != xnoreg, "sanity"); 6248 assert(imm8 <= 0x03, "imm8: %u", imm8); 6249 InstructionMark im(this); 6250 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6251 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6252 attributes.set_is_evex_instruction(); 6253 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6254 emit_int8(0x18); 6255 emit_operand(dst, src); 6256 // 0x00 - insert into q0 128 bits (0..127) 6257 // 0x01 - insert into q1 128 bits (128..255) 6258 // 0x02 - insert into q2 128 bits (256..383) 6259 // 0x03 - insert into q3 128 bits (384..511) 6260 emit_int8(imm8 & 0x03); 6261 } 6262 6263 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6264 assert(VM_Version::supports_evex(), ""); 6265 assert(imm8 <= 0x01, "imm8: %u", imm8); 6266 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6267 attributes.set_is_evex_instruction(); 6268 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6269 //imm8: 6270 // 0x00 - insert into lower 256 bits 6271 // 0x01 - insert into upper 256 bits 6272 emit_int24(0x3A, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6273 } 6274 6275 6276 // vinsertf forms 6277 6278 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6279 assert(VM_Version::supports_avx(), ""); 6280 assert(imm8 <= 0x01, "imm8: %u", imm8); 6281 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6282 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6283 // imm8: 6284 // 0x00 - insert into lower 128 bits 6285 // 0x01 - insert into upper 128 bits 6286 emit_int24(0x18, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6287 } 6288 6289 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6290 assert(VM_Version::supports_avx(), ""); 6291 assert(dst != xnoreg, "sanity"); 6292 assert(imm8 <= 0x01, "imm8: %u", imm8); 6293 InstructionMark im(this); 6294 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6295 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6296 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6297 emit_int8(0x18); 6298 emit_operand(dst, src); 6299 // 0x00 - insert into lower 128 bits 6300 // 0x01 - insert into upper 128 bits 6301 emit_int8(imm8 & 0x01); 6302 } 6303 6304 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6305 assert(VM_Version::supports_avx2(), ""); 6306 assert(imm8 <= 0x03, "imm8: %u", imm8); 6307 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6308 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6309 // imm8: 6310 // 0x00 - insert into q0 128 bits (0..127) 6311 // 0x01 - insert into q1 128 bits (128..255) 6312 // 0x02 - insert into q0 128 bits (256..383) 6313 // 0x03 - insert into q1 128 bits (384..512) 6314 emit_int24(0x18, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6315 } 6316 6317 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6318 assert(VM_Version::supports_avx(), ""); 6319 assert(dst != xnoreg, "sanity"); 6320 assert(imm8 <= 0x03, "imm8: %u", imm8); 6321 InstructionMark im(this); 6322 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6323 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6324 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6325 emit_int8(0x18); 6326 emit_operand(dst, src); 6327 // 0x00 - insert into q0 128 bits (0..127) 6328 // 0x01 - insert into q1 128 bits (128..255) 6329 // 0x02 - insert into q0 128 bits (256..383) 6330 // 0x03 - insert into q1 128 bits (384..512) 6331 emit_int8(imm8 & 0x03); 6332 } 6333 6334 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 6335 assert(VM_Version::supports_evex(), ""); 6336 assert(imm8 <= 0x01, "imm8: %u", imm8); 6337 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6338 attributes.set_is_evex_instruction(); 6339 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6340 // imm8: 6341 // 0x00 - insert into lower 256 bits 6342 // 0x01 - insert into upper 256 bits 6343 emit_int24(0x1A, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6344 } 6345 6346 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 6347 assert(VM_Version::supports_evex(), ""); 6348 assert(dst != xnoreg, "sanity"); 6349 assert(imm8 <= 0x01, "imm8: %u", imm8); 6350 InstructionMark im(this); 6351 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6352 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 6353 attributes.set_is_evex_instruction(); 6354 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6355 emit_int8(0x1A); 6356 emit_operand(dst, src); 6357 // 0x00 - insert into lower 256 bits 6358 // 0x01 - insert into upper 256 bits 6359 emit_int8(imm8 & 0x01); 6360 } 6361 6362 6363 // vextracti forms 6364 6365 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6366 assert(VM_Version::supports_avx2(), ""); 6367 assert(imm8 <= 0x01, "imm8: %u", imm8); 6368 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6369 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6370 // imm8: 6371 // 0x00 - extract from lower 128 bits 6372 // 0x01 - extract from upper 128 bits 6373 emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6374 } 6375 6376 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 6377 assert(VM_Version::supports_avx2(), ""); 6378 assert(src != xnoreg, "sanity"); 6379 assert(imm8 <= 0x01, "imm8: %u", imm8); 6380 InstructionMark im(this); 6381 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6382 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6383 attributes.reset_is_clear_context(); 6384 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6385 emit_int8(0x39); 6386 emit_operand(src, dst); 6387 // 0x00 - extract from lower 128 bits 6388 // 0x01 - extract from upper 128 bits 6389 emit_int8(imm8 & 0x01); 6390 } 6391 6392 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6393 assert(VM_Version::supports_evex(), ""); 6394 assert(imm8 <= 0x03, "imm8: %u", imm8); 6395 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6396 attributes.set_is_evex_instruction(); 6397 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6398 // imm8: 6399 // 0x00 - extract from bits 127:0 6400 // 0x01 - extract from bits 255:128 6401 // 0x02 - extract from bits 383:256 6402 // 0x03 - extract from bits 511:384 6403 emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6404 } 6405 6406 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { 6407 assert(VM_Version::supports_evex(), ""); 6408 assert(src != xnoreg, "sanity"); 6409 assert(imm8 <= 0x03, "imm8: %u", imm8); 6410 InstructionMark im(this); 6411 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6412 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6413 attributes.reset_is_clear_context(); 6414 attributes.set_is_evex_instruction(); 6415 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6416 emit_int8(0x39); 6417 emit_operand(src, dst); 6418 // 0x00 - extract from bits 127:0 6419 // 0x01 - extract from bits 255:128 6420 // 0x02 - extract from bits 383:256 6421 // 0x03 - extract from bits 511:384 6422 emit_int8(imm8 & 0x03); 6423 } 6424 6425 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6426 assert(VM_Version::supports_avx512dq(), ""); 6427 assert(imm8 <= 0x03, "imm8: %u", imm8); 6428 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6429 attributes.set_is_evex_instruction(); 6430 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6431 // imm8: 6432 // 0x00 - extract from bits 127:0 6433 // 0x01 - extract from bits 255:128 6434 // 0x02 - extract from bits 383:256 6435 // 0x03 - extract from bits 511:384 6436 emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6437 } 6438 6439 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6440 assert(VM_Version::supports_evex(), ""); 6441 assert(imm8 <= 0x01, "imm8: %u", imm8); 6442 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6443 attributes.set_is_evex_instruction(); 6444 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6445 // imm8: 6446 // 0x00 - extract from lower 256 bits 6447 // 0x01 - extract from upper 256 bits 6448 emit_int24(0x3B, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6449 } 6450 6451 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { 6452 assert(VM_Version::supports_evex(), ""); 6453 assert(src != xnoreg, "sanity"); 6454 assert(imm8 <= 0x01, "imm8: %u", imm8); 6455 InstructionMark im(this); 6456 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6457 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit); 6458 attributes.reset_is_clear_context(); 6459 attributes.set_is_evex_instruction(); 6460 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6461 emit_int8(0x38); 6462 emit_operand(src, dst); 6463 // 0x00 - extract from lower 256 bits 6464 // 0x01 - extract from upper 256 bits 6465 emit_int8(imm8 & 0x01); 6466 } 6467 // vextractf forms 6468 6469 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6470 assert(VM_Version::supports_avx(), ""); 6471 assert(imm8 <= 0x01, "imm8: %u", imm8); 6472 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6473 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6474 // imm8: 6475 // 0x00 - extract from lower 128 bits 6476 // 0x01 - extract from upper 128 bits 6477 emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6478 } 6479 6480 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { 6481 assert(VM_Version::supports_avx(), ""); 6482 assert(src != xnoreg, "sanity"); 6483 assert(imm8 <= 0x01, "imm8: %u", imm8); 6484 InstructionMark im(this); 6485 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6486 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6487 attributes.reset_is_clear_context(); 6488 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6489 emit_int8(0x19); 6490 emit_operand(src, dst); 6491 // 0x00 - extract from lower 128 bits 6492 // 0x01 - extract from upper 128 bits 6493 emit_int8(imm8 & 0x01); 6494 } 6495 6496 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6497 assert(VM_Version::supports_evex(), ""); 6498 assert(imm8 <= 0x03, "imm8: %u", imm8); 6499 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6500 attributes.set_is_evex_instruction(); 6501 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6502 // imm8: 6503 // 0x00 - extract from bits 127:0 6504 // 0x01 - extract from bits 255:128 6505 // 0x02 - extract from bits 383:256 6506 // 0x03 - extract from bits 511:384 6507 emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6508 } 6509 6510 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { 6511 assert(VM_Version::supports_evex(), ""); 6512 assert(src != xnoreg, "sanity"); 6513 assert(imm8 <= 0x03, "imm8: %u", imm8); 6514 InstructionMark im(this); 6515 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6516 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit); 6517 attributes.reset_is_clear_context(); 6518 attributes.set_is_evex_instruction(); 6519 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6520 emit_int8(0x19); 6521 emit_operand(src, dst); 6522 // 0x00 - extract from bits 127:0 6523 // 0x01 - extract from bits 255:128 6524 // 0x02 - extract from bits 383:256 6525 // 0x03 - extract from bits 511:384 6526 emit_int8(imm8 & 0x03); 6527 } 6528 6529 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6530 assert(VM_Version::supports_avx512dq(), ""); 6531 assert(imm8 <= 0x03, "imm8: %u", imm8); 6532 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6533 attributes.set_is_evex_instruction(); 6534 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6535 // imm8: 6536 // 0x00 - extract from bits 127:0 6537 // 0x01 - extract from bits 255:128 6538 // 0x02 - extract from bits 383:256 6539 // 0x03 - extract from bits 511:384 6540 emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x03); 6541 } 6542 6543 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { 6544 assert(VM_Version::supports_evex(), ""); 6545 assert(imm8 <= 0x01, "imm8: %u", imm8); 6546 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6547 attributes.set_is_evex_instruction(); 6548 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6549 // imm8: 6550 // 0x00 - extract from lower 256 bits 6551 // 0x01 - extract from upper 256 bits 6552 emit_int24(0x1B, (unsigned char)(0xC0 | encode), imm8 & 0x01); 6553 } 6554 6555 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { 6556 assert(VM_Version::supports_evex(), ""); 6557 assert(src != xnoreg, "sanity"); 6558 assert(imm8 <= 0x01, "imm8: %u", imm8); 6559 InstructionMark im(this); 6560 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6561 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit); 6562 attributes.reset_is_clear_context(); 6563 attributes.set_is_evex_instruction(); 6564 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6565 emit_int8(0x1B); 6566 emit_operand(src, dst); 6567 // 0x00 - extract from lower 256 bits 6568 // 0x01 - extract from upper 256 bits 6569 emit_int8(imm8 & 0x01); 6570 } 6571 6572 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6573 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { 6574 assert(VM_Version::supports_avx2(), ""); 6575 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6576 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6577 emit_int16(0x78, (unsigned char)(0xC0 | encode)); 6578 } 6579 6580 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { 6581 assert(VM_Version::supports_avx2(), ""); 6582 assert(dst != xnoreg, "sanity"); 6583 InstructionMark im(this); 6584 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6585 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit); 6586 // swap src<->dst for encoding 6587 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6588 emit_int8(0x78); 6589 emit_operand(dst, src); 6590 } 6591 6592 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6593 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 6594 assert(VM_Version::supports_avx2(), ""); 6595 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6596 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6597 emit_int16(0x79, (unsigned char)(0xC0 | encode)); 6598 } 6599 6600 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { 6601 assert(VM_Version::supports_avx2(), ""); 6602 assert(dst != xnoreg, "sanity"); 6603 InstructionMark im(this); 6604 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6605 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit); 6606 // swap src<->dst for encoding 6607 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6608 emit_int8(0x79); 6609 emit_operand(dst, src); 6610 } 6611 6612 // xmm/mem sourced byte/word/dword/qword replicate 6613 6614 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 6615 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { 6616 assert(UseAVX >= 2, ""); 6617 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6618 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6619 emit_int16(0x58, (unsigned char)(0xC0 | encode)); 6620 } 6621 6622 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { 6623 assert(VM_Version::supports_avx2(), ""); 6624 assert(dst != xnoreg, "sanity"); 6625 InstructionMark im(this); 6626 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6627 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6628 // swap src<->dst for encoding 6629 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6630 emit_int8(0x58); 6631 emit_operand(dst, src); 6632 } 6633 6634 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 6635 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { 6636 assert(VM_Version::supports_avx2(), ""); 6637 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6638 attributes.set_rex_vex_w_reverted(); 6639 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6640 emit_int16(0x59, (unsigned char)(0xC0 | encode)); 6641 } 6642 6643 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { 6644 assert(VM_Version::supports_avx2(), ""); 6645 assert(dst != xnoreg, "sanity"); 6646 InstructionMark im(this); 6647 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6648 attributes.set_rex_vex_w_reverted(); 6649 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6650 // swap src<->dst for encoding 6651 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6652 emit_int8(0x59); 6653 emit_operand(dst, src); 6654 } 6655 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) { 6656 assert(vector_len != Assembler::AVX_128bit, ""); 6657 assert(VM_Version::supports_avx512dq(), ""); 6658 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6659 attributes.set_rex_vex_w_reverted(); 6660 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6661 emit_int16(0x5A, (unsigned char)(0xC0 | encode)); 6662 } 6663 6664 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { 6665 assert(vector_len != Assembler::AVX_128bit, ""); 6666 assert(VM_Version::supports_avx512dq(), ""); 6667 assert(dst != xnoreg, "sanity"); 6668 InstructionMark im(this); 6669 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6670 attributes.set_rex_vex_w_reverted(); 6671 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit); 6672 // swap src<->dst for encoding 6673 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6674 emit_int8(0x5A); 6675 emit_operand(dst, src); 6676 } 6677 6678 // scalar single/double precision replicate 6679 6680 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL 6681 void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { 6682 assert(VM_Version::supports_avx2(), ""); 6683 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6684 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6685 emit_int16(0x18, (unsigned char)(0xC0 | encode)); 6686 } 6687 6688 void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { 6689 assert(VM_Version::supports_avx(), ""); 6690 assert(dst != xnoreg, "sanity"); 6691 InstructionMark im(this); 6692 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6693 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit); 6694 // swap src<->dst for encoding 6695 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6696 emit_int8(0x18); 6697 emit_operand(dst, src); 6698 } 6699 6700 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL 6701 void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) { 6702 assert(VM_Version::supports_avx2(), ""); 6703 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 6704 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6705 attributes.set_rex_vex_w_reverted(); 6706 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6707 emit_int16(0x19, (unsigned char)(0xC0 | encode)); 6708 } 6709 6710 void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { 6711 assert(VM_Version::supports_avx(), ""); 6712 assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); 6713 assert(dst != xnoreg, "sanity"); 6714 InstructionMark im(this); 6715 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6716 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6717 attributes.set_rex_vex_w_reverted(); 6718 // swap src<->dst for encoding 6719 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6720 emit_int8(0x19); 6721 emit_operand(dst, src); 6722 } 6723 6724 6725 // gpr source broadcast forms 6726 6727 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6728 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { 6729 assert(VM_Version::supports_avx512bw(), ""); 6730 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6731 attributes.set_is_evex_instruction(); 6732 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6733 emit_int16(0x7A, (unsigned char)(0xC0 | encode)); 6734 } 6735 6736 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL 6737 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { 6738 assert(VM_Version::supports_avx512bw(), ""); 6739 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); 6740 attributes.set_is_evex_instruction(); 6741 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6742 emit_int16(0x7B, (unsigned char)(0xC0 | encode)); 6743 } 6744 6745 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL 6746 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { 6747 assert(VM_Version::supports_evex(), ""); 6748 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6749 attributes.set_is_evex_instruction(); 6750 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6751 emit_int16(0x7C, (unsigned char)(0xC0 | encode)); 6752 } 6753 6754 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL 6755 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { 6756 assert(VM_Version::supports_evex(), ""); 6757 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6758 attributes.set_is_evex_instruction(); 6759 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6760 emit_int16(0x7C, (unsigned char)(0xC0 | encode)); 6761 } 6762 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { 6763 assert(VM_Version::supports_evex(), ""); 6764 assert(dst != xnoreg, "sanity"); 6765 InstructionMark im(this); 6766 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); 6767 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 6768 attributes.reset_is_clear_context(); 6769 attributes.set_embedded_opmask_register_specifier(mask); 6770 attributes.set_is_evex_instruction(); 6771 // swap src<->dst for encoding 6772 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 6773 emit_int8((unsigned char)0x90); 6774 emit_operand(dst, src); 6775 } 6776 // Carry-Less Multiplication Quadword 6777 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { 6778 assert(VM_Version::supports_clmul(), ""); 6779 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6780 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6781 emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); 6782 } 6783 6784 // Carry-Less Multiplication Quadword 6785 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 6786 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 6787 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 6788 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6789 emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); 6790 } 6791 6792 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { 6793 assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); 6794 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); 6795 attributes.set_is_evex_instruction(); 6796 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 6797 emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); 6798 } 6799 6800 void Assembler::vzeroupper_uncached() { 6801 if (VM_Version::supports_vzeroupper()) { 6802 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 6803 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 6804 emit_int8(0x77); 6805 } 6806 } 6807 6808 #ifndef _LP64 6809 // 32bit only pieces of the assembler 6810 6811 void Assembler::vzeroupper() { 6812 vzeroupper_uncached(); 6813 } 6814 6815 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { 6816 // NO PREFIX AS NEVER 64BIT 6817 InstructionMark im(this); 6818 emit_int8((unsigned char)0x81, (unsigned char)(0xF8 | src1->encoding())); 6819 emit_data(imm32, rspec, 0); 6820 } 6821 6822 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { 6823 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs 6824 InstructionMark im(this); 6825 emit_int8((unsigned char)0x81); 6826 emit_operand(rdi, src1); 6827 emit_data(imm32, rspec, 0); 6828 } 6829 6830 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, 6831 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded 6832 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. 6833 void Assembler::cmpxchg8(Address adr) { 6834 InstructionMark im(this); 6835 emit_int16(0x0F, (unsigned char)0xC7); 6836 emit_operand(rcx, adr); 6837 } 6838 6839 void Assembler::decl(Register dst) { 6840 // Don't use it directly. Use MacroAssembler::decrementl() instead. 6841 emit_int8(0x48 | dst->encoding()); 6842 } 6843 6844 // 64bit doesn't use the x87 6845 6846 void Assembler::fabs() { 6847 emit_int16((unsigned char)0xD9, (unsigned char)0xE1); 6848 } 6849 6850 void Assembler::fadd(int i) { 6851 emit_farith(0xD8, 0xC0, i); 6852 } 6853 6854 void Assembler::fadd_d(Address src) { 6855 InstructionMark im(this); 6856 emit_int8((unsigned char)0xDC); 6857 emit_operand32(rax, src); 6858 } 6859 6860 void Assembler::fadd_s(Address src) { 6861 InstructionMark im(this); 6862 emit_int8((unsigned char)0xD8); 6863 emit_operand32(rax, src); 6864 } 6865 6866 void Assembler::fadda(int i) { 6867 emit_farith(0xDC, 0xC0, i); 6868 } 6869 6870 void Assembler::faddp(int i) { 6871 emit_farith(0xDE, 0xC0, i); 6872 } 6873 6874 void Assembler::fchs() { 6875 emit_int16((unsigned char)0xD9, (unsigned char)0xE0); 6876 } 6877 6878 void Assembler::fcom(int i) { 6879 emit_farith(0xD8, 0xD0, i); 6880 } 6881 6882 void Assembler::fcomp(int i) { 6883 emit_farith(0xD8, 0xD8, i); 6884 } 6885 6886 void Assembler::fcomp_d(Address src) { 6887 InstructionMark im(this); 6888 emit_int8((unsigned char)0xDC); 6889 emit_operand32(rbx, src); 6890 } 6891 6892 void Assembler::fcomp_s(Address src) { 6893 InstructionMark im(this); 6894 emit_int8((unsigned char)0xD8); 6895 emit_operand32(rbx, src); 6896 } 6897 6898 void Assembler::fcompp() { 6899 emit_int16((unsigned char)0xDE, (unsigned char)0xD9); 6900 } 6901 6902 void Assembler::fcos() { 6903 emit_int16((unsigned char)0xD9, (unsigned char)0xFF); 6904 } 6905 6906 void Assembler::fdecstp() { 6907 emit_int16((unsigned char)0xD9, (unsigned char)0xF6); 6908 } 6909 6910 void Assembler::fdiv(int i) { 6911 emit_farith(0xD8, 0xF0, i); 6912 } 6913 6914 void Assembler::fdiv_d(Address src) { 6915 InstructionMark im(this); 6916 emit_int8((unsigned char)0xDC); 6917 emit_operand32(rsi, src); 6918 } 6919 6920 void Assembler::fdiv_s(Address src) { 6921 InstructionMark im(this); 6922 emit_int8((unsigned char)0xD8); 6923 emit_operand32(rsi, src); 6924 } 6925 6926 void Assembler::fdiva(int i) { 6927 emit_farith(0xDC, 0xF8, i); 6928 } 6929 6930 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994) 6931 // is erroneous for some of the floating-point instructions below. 6932 6933 void Assembler::fdivp(int i) { 6934 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong) 6935 } 6936 6937 void Assembler::fdivr(int i) { 6938 emit_farith(0xD8, 0xF8, i); 6939 } 6940 6941 void Assembler::fdivr_d(Address src) { 6942 InstructionMark im(this); 6943 emit_int8((unsigned char)0xDC); 6944 emit_operand32(rdi, src); 6945 } 6946 6947 void Assembler::fdivr_s(Address src) { 6948 InstructionMark im(this); 6949 emit_int8((unsigned char)0xD8); 6950 emit_operand32(rdi, src); 6951 } 6952 6953 void Assembler::fdivra(int i) { 6954 emit_farith(0xDC, 0xF0, i); 6955 } 6956 6957 void Assembler::fdivrp(int i) { 6958 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong) 6959 } 6960 6961 void Assembler::ffree(int i) { 6962 emit_farith(0xDD, 0xC0, i); 6963 } 6964 6965 void Assembler::fild_d(Address adr) { 6966 InstructionMark im(this); 6967 emit_int8((unsigned char)0xDF); 6968 emit_operand32(rbp, adr); 6969 } 6970 6971 void Assembler::fild_s(Address adr) { 6972 InstructionMark im(this); 6973 emit_int8((unsigned char)0xDB); 6974 emit_operand32(rax, adr); 6975 } 6976 6977 void Assembler::fincstp() { 6978 emit_int16((unsigned char)0xD9, (unsigned char)0xF7); 6979 } 6980 6981 void Assembler::finit() { 6982 emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3); 6983 } 6984 6985 void Assembler::fist_s(Address adr) { 6986 InstructionMark im(this); 6987 emit_int8((unsigned char)0xDB); 6988 emit_operand32(rdx, adr); 6989 } 6990 6991 void Assembler::fistp_d(Address adr) { 6992 InstructionMark im(this); 6993 emit_int8((unsigned char)0xDF); 6994 emit_operand32(rdi, adr); 6995 } 6996 6997 void Assembler::fistp_s(Address adr) { 6998 InstructionMark im(this); 6999 emit_int8((unsigned char)0xDB); 7000 emit_operand32(rbx, adr); 7001 } 7002 7003 void Assembler::fld1() { 7004 emit_int16((unsigned char)0xD9, (unsigned char)0xE8); 7005 } 7006 7007 void Assembler::fld_d(Address adr) { 7008 InstructionMark im(this); 7009 emit_int8((unsigned char)0xDD); 7010 emit_operand32(rax, adr); 7011 } 7012 7013 void Assembler::fld_s(Address adr) { 7014 InstructionMark im(this); 7015 emit_int8((unsigned char)0xD9); 7016 emit_operand32(rax, adr); 7017 } 7018 7019 7020 void Assembler::fld_s(int index) { 7021 emit_farith(0xD9, 0xC0, index); 7022 } 7023 7024 void Assembler::fld_x(Address adr) { 7025 InstructionMark im(this); 7026 emit_int8((unsigned char)0xDB); 7027 emit_operand32(rbp, adr); 7028 } 7029 7030 void Assembler::fldcw(Address src) { 7031 InstructionMark im(this); 7032 emit_int8((unsigned char)0xD9); 7033 emit_operand32(rbp, src); 7034 } 7035 7036 void Assembler::fldenv(Address src) { 7037 InstructionMark im(this); 7038 emit_int8((unsigned char)0xD9); 7039 emit_operand32(rsp, src); 7040 } 7041 7042 void Assembler::fldlg2() { 7043 emit_int16((unsigned char)0xD9, (unsigned char)0xEC); 7044 } 7045 7046 void Assembler::fldln2() { 7047 emit_int16((unsigned char)0xD9, (unsigned char)0xED); 7048 } 7049 7050 void Assembler::fldz() { 7051 emit_int16((unsigned char)0xD9, (unsigned char)0xEE); 7052 } 7053 7054 void Assembler::flog() { 7055 fldln2(); 7056 fxch(); 7057 fyl2x(); 7058 } 7059 7060 void Assembler::flog10() { 7061 fldlg2(); 7062 fxch(); 7063 fyl2x(); 7064 } 7065 7066 void Assembler::fmul(int i) { 7067 emit_farith(0xD8, 0xC8, i); 7068 } 7069 7070 void Assembler::fmul_d(Address src) { 7071 InstructionMark im(this); 7072 emit_int8((unsigned char)0xDC); 7073 emit_operand32(rcx, src); 7074 } 7075 7076 void Assembler::fmul_s(Address src) { 7077 InstructionMark im(this); 7078 emit_int8((unsigned char)0xD8); 7079 emit_operand32(rcx, src); 7080 } 7081 7082 void Assembler::fmula(int i) { 7083 emit_farith(0xDC, 0xC8, i); 7084 } 7085 7086 void Assembler::fmulp(int i) { 7087 emit_farith(0xDE, 0xC8, i); 7088 } 7089 7090 void Assembler::fnsave(Address dst) { 7091 InstructionMark im(this); 7092 emit_int8((unsigned char)0xDD); 7093 emit_operand32(rsi, dst); 7094 } 7095 7096 void Assembler::fnstcw(Address src) { 7097 InstructionMark im(this); 7098 emit_int16((unsigned char)0x9B, (unsigned char)0xD9); 7099 emit_operand32(rdi, src); 7100 } 7101 7102 void Assembler::fnstsw_ax() { 7103 emit_int16((unsigned char)0xDF, (unsigned char)0xE0); 7104 } 7105 7106 void Assembler::fprem() { 7107 emit_int16((unsigned char)0xD9, (unsigned char)0xF8); 7108 } 7109 7110 void Assembler::fprem1() { 7111 emit_int16((unsigned char)0xD9, (unsigned char)0xF5); 7112 } 7113 7114 void Assembler::frstor(Address src) { 7115 InstructionMark im(this); 7116 emit_int8((unsigned char)0xDD); 7117 emit_operand32(rsp, src); 7118 } 7119 7120 void Assembler::fsin() { 7121 emit_int16((unsigned char)0xD9, (unsigned char)0xFE); 7122 } 7123 7124 void Assembler::fsqrt() { 7125 emit_int16((unsigned char)0xD9, (unsigned char)0xFA); 7126 } 7127 7128 void Assembler::fst_d(Address adr) { 7129 InstructionMark im(this); 7130 emit_int8((unsigned char)0xDD); 7131 emit_operand32(rdx, adr); 7132 } 7133 7134 void Assembler::fst_s(Address adr) { 7135 InstructionMark im(this); 7136 emit_int8((unsigned char)0xD9); 7137 emit_operand32(rdx, adr); 7138 } 7139 7140 void Assembler::fstp_d(Address adr) { 7141 InstructionMark im(this); 7142 emit_int8((unsigned char)0xDD); 7143 emit_operand32(rbx, adr); 7144 } 7145 7146 void Assembler::fstp_d(int index) { 7147 emit_farith(0xDD, 0xD8, index); 7148 } 7149 7150 void Assembler::fstp_s(Address adr) { 7151 InstructionMark im(this); 7152 emit_int8((unsigned char)0xD9); 7153 emit_operand32(rbx, adr); 7154 } 7155 7156 void Assembler::fstp_x(Address adr) { 7157 InstructionMark im(this); 7158 emit_int8((unsigned char)0xDB); 7159 emit_operand32(rdi, adr); 7160 } 7161 7162 void Assembler::fsub(int i) { 7163 emit_farith(0xD8, 0xE0, i); 7164 } 7165 7166 void Assembler::fsub_d(Address src) { 7167 InstructionMark im(this); 7168 emit_int8((unsigned char)0xDC); 7169 emit_operand32(rsp, src); 7170 } 7171 7172 void Assembler::fsub_s(Address src) { 7173 InstructionMark im(this); 7174 emit_int8((unsigned char)0xD8); 7175 emit_operand32(rsp, src); 7176 } 7177 7178 void Assembler::fsuba(int i) { 7179 emit_farith(0xDC, 0xE8, i); 7180 } 7181 7182 void Assembler::fsubp(int i) { 7183 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong) 7184 } 7185 7186 void Assembler::fsubr(int i) { 7187 emit_farith(0xD8, 0xE8, i); 7188 } 7189 7190 void Assembler::fsubr_d(Address src) { 7191 InstructionMark im(this); 7192 emit_int8((unsigned char)0xDC); 7193 emit_operand32(rbp, src); 7194 } 7195 7196 void Assembler::fsubr_s(Address src) { 7197 InstructionMark im(this); 7198 emit_int8((unsigned char)0xD8); 7199 emit_operand32(rbp, src); 7200 } 7201 7202 void Assembler::fsubra(int i) { 7203 emit_farith(0xDC, 0xE0, i); 7204 } 7205 7206 void Assembler::fsubrp(int i) { 7207 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) 7208 } 7209 7210 void Assembler::ftan() { 7211 emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8); 7212 } 7213 7214 void Assembler::ftst() { 7215 emit_int16((unsigned char)0xD9, (unsigned char)0xE4); 7216 } 7217 7218 void Assembler::fucomi(int i) { 7219 // make sure the instruction is supported (introduced for P6, together with cmov) 7220 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 7221 emit_farith(0xDB, 0xE8, i); 7222 } 7223 7224 void Assembler::fucomip(int i) { 7225 // make sure the instruction is supported (introduced for P6, together with cmov) 7226 guarantee(VM_Version::supports_cmov(), "illegal instruction"); 7227 emit_farith(0xDF, 0xE8, i); 7228 } 7229 7230 void Assembler::fwait() { 7231 emit_int8((unsigned char)0x9B); 7232 } 7233 7234 void Assembler::fxch(int i) { 7235 emit_farith(0xD9, 0xC8, i); 7236 } 7237 7238 void Assembler::fyl2x() { 7239 emit_int16((unsigned char)0xD9, (unsigned char)0xF1); 7240 } 7241 7242 void Assembler::frndint() { 7243 emit_int16((unsigned char)0xD9, (unsigned char)0xFC); 7244 } 7245 7246 void Assembler::f2xm1() { 7247 emit_int16((unsigned char)0xD9, (unsigned char)0xF0); 7248 } 7249 7250 void Assembler::fldl2e() { 7251 emit_int16((unsigned char)0xD9, (unsigned char)0xEA); 7252 } 7253 #endif // !_LP64 7254 7255 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. 7256 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; 7257 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding. 7258 static int simd_opc[4] = { 0, 0, 0x38, 0x3A }; 7259 7260 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding. 7261 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 7262 if (pre > 0) { 7263 emit_int8(simd_pre[pre]); 7264 } 7265 if (rex_w) { 7266 prefixq(adr, xreg); 7267 } else { 7268 prefix(adr, xreg); 7269 } 7270 if (opc > 0) { 7271 emit_int8(0x0F); 7272 int opc2 = simd_opc[opc]; 7273 if (opc2 > 0) { 7274 emit_int8(opc2); 7275 } 7276 } 7277 } 7278 7279 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) { 7280 if (pre > 0) { 7281 emit_int8(simd_pre[pre]); 7282 } 7283 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc); 7284 if (opc > 0) { 7285 emit_int8(0x0F); 7286 int opc2 = simd_opc[opc]; 7287 if (opc2 > 0) { 7288 emit_int8(opc2); 7289 } 7290 } 7291 return encode; 7292 } 7293 7294 7295 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { 7296 int vector_len = _attributes->get_vector_len(); 7297 bool vex_w = _attributes->is_rex_vex_w(); 7298 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { 7299 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); 7300 byte1 = (~byte1) & 0xE0; 7301 byte1 |= opc; 7302 7303 int byte2 = ((~nds_enc) & 0xf) << 3; 7304 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; 7305 7306 emit_int24((unsigned char)VEX_3bytes, byte1, byte2); 7307 } else { 7308 int byte1 = vex_r ? VEX_R : 0; 7309 byte1 = (~byte1) & 0x80; 7310 byte1 |= ((~nds_enc) & 0xf) << 3; 7311 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; 7312 emit_int16((unsigned char)VEX_2bytes, byte1); 7313 } 7314 } 7315 7316 // This is a 4 byte encoding 7317 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ 7318 // EVEX 0x62 prefix 7319 // byte1 = EVEX_4bytes; 7320 7321 bool vex_w = _attributes->is_rex_vex_w(); 7322 int evex_encoding = (vex_w ? VEX_W : 0); 7323 // EVEX.b is not currently used for broadcast of single element or data rounding modes 7324 _attributes->set_evex_encoding(evex_encoding); 7325 7326 // P0: byte 2, initialized to RXBR`00mm 7327 // instead of not'd 7328 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); 7329 byte2 = (~byte2) & 0xF0; 7330 // confine opc opcode extensions in mm bits to lower two bits 7331 // of form {0F, 0F_38, 0F_3A} 7332 byte2 |= opc; 7333 7334 // P1: byte 3 as Wvvvv1pp 7335 int byte3 = ((~nds_enc) & 0xf) << 3; 7336 // p[10] is always 1 7337 byte3 |= EVEX_F; 7338 byte3 |= (vex_w & 1) << 7; 7339 // confine pre opcode extensions in pp bits to lower two bits 7340 // of form {66, F3, F2} 7341 byte3 |= pre; 7342 7343 // P2: byte 4 as zL'Lbv'aaa 7344 // kregs are implemented in the low 3 bits as aaa 7345 int byte4 = (_attributes->is_no_reg_mask()) ? 7346 0 : 7347 _attributes->get_embedded_opmask_register_specifier(); 7348 // EVEX.v` for extending EVEX.vvvv or VIDX 7349 byte4 |= (evex_v ? 0: EVEX_V); 7350 // third EXEC.b for broadcast actions 7351 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0); 7352 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024 7353 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; 7354 // last is EVEX.z for zero/merge actions 7355 if (_attributes->is_no_reg_mask() == false) { 7356 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); 7357 } 7358 7359 emit_int32(EVEX_4bytes, byte2, byte3, byte4); 7360 } 7361 7362 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 7363 bool vex_r = (xreg_enc & 8) == 8; 7364 bool vex_b = adr.base_needs_rex(); 7365 bool vex_x; 7366 if (adr.isxmmindex()) { 7367 vex_x = adr.xmmindex_needs_rex(); 7368 } else { 7369 vex_x = adr.index_needs_rex(); 7370 } 7371 set_attributes(attributes); 7372 attributes->set_current_assembler(this); 7373 7374 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 7375 // is allowed in legacy mode and has resources which will fit in it. 7376 // Pure EVEX instructions will have is_evex_instruction set in their definition. 7377 if (!attributes->is_legacy_mode()) { 7378 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 7379 if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { 7380 attributes->set_is_legacy_mode(); 7381 } 7382 } 7383 } 7384 7385 if (UseAVX > 2) { 7386 assert(((!attributes->uses_vl()) || 7387 (attributes->get_vector_len() == AVX_512bit) || 7388 (!_legacy_mode_vl) || 7389 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 7390 assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 7391 } 7392 7393 clear_managed(); 7394 if (UseAVX > 2 && !attributes->is_legacy_mode()) 7395 { 7396 bool evex_r = (xreg_enc >= 16); 7397 bool evex_v; 7398 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) 7399 if (adr.isxmmindex()) { 7400 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false); 7401 } else { 7402 evex_v = (nds_enc >= 16); 7403 } 7404 attributes->set_is_evex_instruction(); 7405 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 7406 } else { 7407 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 7408 attributes->set_rex_vex_w(false); 7409 } 7410 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 7411 } 7412 } 7413 7414 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { 7415 bool vex_r = (dst_enc & 8) == 8; 7416 bool vex_b = (src_enc & 8) == 8; 7417 bool vex_x = false; 7418 set_attributes(attributes); 7419 attributes->set_current_assembler(this); 7420 7421 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction 7422 // is allowed in legacy mode and has resources which will fit in it. 7423 // Pure EVEX instructions will have is_evex_instruction set in their definition. 7424 if (!attributes->is_legacy_mode()) { 7425 if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { 7426 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && 7427 (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { 7428 attributes->set_is_legacy_mode(); 7429 } 7430 } 7431 } 7432 7433 if (UseAVX > 2) { 7434 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false 7435 // Instruction with uses_vl true are vector instructions 7436 // All the vector instructions with AVX_512bit length can have legacy_mode as false 7437 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported 7438 // Rest all should have legacy_mode set as true 7439 assert(((!attributes->uses_vl()) || 7440 (attributes->get_vector_len() == AVX_512bit) || 7441 (!_legacy_mode_vl) || 7442 (attributes->is_legacy_mode())),"XMM register should be 0-15"); 7443 // Instruction with legacy_mode true should have dst, nds and src < 15 7444 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); 7445 } 7446 7447 clear_managed(); 7448 if (UseAVX > 2 && !attributes->is_legacy_mode()) 7449 { 7450 bool evex_r = (dst_enc >= 16); 7451 bool evex_v = (nds_enc >= 16); 7452 // can use vex_x as bank extender on rm encoding 7453 vex_x = (src_enc >= 16); 7454 attributes->set_is_evex_instruction(); 7455 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc); 7456 } else { 7457 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) { 7458 attributes->set_rex_vex_w(false); 7459 } 7460 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); 7461 } 7462 7463 // return modrm byte components for operands 7464 return (((dst_enc & 7) << 3) | (src_enc & 7)); 7465 } 7466 7467 7468 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, 7469 VexOpcode opc, InstructionAttr *attributes) { 7470 if (UseAVX > 0) { 7471 int xreg_enc = xreg->encoding(); 7472 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 7473 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes); 7474 } else { 7475 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding"); 7476 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w()); 7477 } 7478 } 7479 7480 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, 7481 VexOpcode opc, InstructionAttr *attributes) { 7482 int dst_enc = dst->encoding(); 7483 int src_enc = src->encoding(); 7484 if (UseAVX > 0) { 7485 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 7486 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes); 7487 } else { 7488 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding"); 7489 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w()); 7490 } 7491 } 7492 7493 void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7494 assert(VM_Version::supports_avx(), ""); 7495 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7496 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7497 emit_int16(0x5F, (unsigned char)(0xC0 | encode)); 7498 } 7499 7500 void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7501 assert(VM_Version::supports_avx(), ""); 7502 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7503 attributes.set_rex_vex_w_reverted(); 7504 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7505 emit_int16(0x5F, (unsigned char)(0xC0 | encode)); 7506 } 7507 7508 void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7509 assert(VM_Version::supports_avx(), ""); 7510 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7511 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 7512 emit_int16(0x5D, (unsigned char)(0xC0 | encode)); 7513 } 7514 7515 void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { 7516 assert(VM_Version::supports_avx(), ""); 7517 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 7518 attributes.set_rex_vex_w_reverted(); 7519 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 7520 emit_int16(0x5D, (unsigned char)(0xC0 | encode)); 7521 } 7522 7523 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 7524 assert(VM_Version::supports_avx(), ""); 7525 assert(vector_len <= AVX_256bit, ""); 7526 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7527 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 7528 emit_int24((unsigned char)0xC2, (unsigned char)(0xC0 | encode), (unsigned char)(0xF & cop)); 7529 } 7530 7531 void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 7532 assert(VM_Version::supports_avx(), ""); 7533 assert(vector_len <= AVX_256bit, ""); 7534 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7535 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7536 int src2_enc = src2->encoding(); 7537 emit_int24((unsigned char)0x4B, (unsigned char)(0xC0 | encode), (unsigned char)(0xF0 & src2_enc << 4)); 7538 } 7539 7540 void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { 7541 assert(VM_Version::supports_avx(), ""); 7542 assert(vector_len <= AVX_256bit, ""); 7543 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7544 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); 7545 emit_int24((unsigned char)0xC2, (unsigned char)(0xC0 | encode), (unsigned char)(0xF & cop)); 7546 } 7547 7548 void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { 7549 assert(VM_Version::supports_avx(), ""); 7550 assert(vector_len <= AVX_256bit, ""); 7551 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7552 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7553 int src2_enc = src2->encoding(); 7554 emit_int24((unsigned char)0x4A, (unsigned char)(0xC0 | encode), (unsigned char)(0xF0 & src2_enc << 4)); 7555 } 7556 7557 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { 7558 assert(VM_Version::supports_avx2(), ""); 7559 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7560 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); 7561 emit_int24((unsigned char)0x02, (unsigned char)(0xC0 | encode), (unsigned char)imm8); 7562 } 7563 7564 void Assembler::shlxl(Register dst, Register src1, Register src2) { 7565 assert(VM_Version::supports_bmi2(), ""); 7566 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7567 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7568 emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); 7569 } 7570 7571 void Assembler::shlxq(Register dst, Register src1, Register src2) { 7572 assert(VM_Version::supports_bmi2(), ""); 7573 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); 7574 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); 7575 emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); 7576 } 7577 7578 #ifndef _LP64 7579 7580 void Assembler::incl(Register dst) { 7581 // Don't use it directly. Use MacroAssembler::incrementl() instead. 7582 emit_int8(0x40 | dst->encoding()); 7583 } 7584 7585 void Assembler::lea(Register dst, Address src) { 7586 leal(dst, src); 7587 } 7588 7589 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) { 7590 InstructionMark im(this); 7591 emit_int8((unsigned char)0xC7); 7592 emit_operand(rax, dst); 7593 emit_data((int)imm32, rspec, 0); 7594 } 7595 7596 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) { 7597 InstructionMark im(this); 7598 int encode = prefix_and_encode(dst->encoding()); 7599 emit_int8((unsigned char)(0xB8 | encode)); 7600 emit_data((int)imm32, rspec, 0); 7601 } 7602 7603 void Assembler::popa() { // 32bit 7604 emit_int8(0x61); 7605 } 7606 7607 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) { 7608 InstructionMark im(this); 7609 emit_int8(0x68); 7610 emit_data(imm32, rspec, 0); 7611 } 7612 7613 void Assembler::pusha() { // 32bit 7614 emit_int8(0x60); 7615 } 7616 7617 void Assembler::set_byte_if_not_zero(Register dst) { 7618 emit_int8(0x0F, (unsigned char)0x95, (unsigned char)(0xE0 | dst->encoding())); 7619 } 7620 7621 #else // LP64 7622 7623 void Assembler::set_byte_if_not_zero(Register dst) { 7624 int enc = prefix_and_encode(dst->encoding(), true); 7625 emit_int24(0x0F, (unsigned char)0x95, (unsigned char)(0xE0 | enc)); 7626 } 7627 7628 // 64bit only pieces of the assembler 7629 // This should only be used by 64bit instructions that can use rip-relative 7630 // it cannot be used by instructions that want an immediate value. 7631 7632 bool Assembler::reachable(AddressLiteral adr) { 7633 int64_t disp; 7634 relocInfo::relocType relocType = adr.reloc(); 7635 7636 // None will force a 64bit literal to the code stream. Likely a placeholder 7637 // for something that will be patched later and we need to certain it will 7638 // always be reachable. 7639 if (relocType == relocInfo::none) { 7640 return false; 7641 } 7642 if (relocType == relocInfo::internal_word_type) { 7643 // This should be rip relative and easily reachable. 7644 return true; 7645 } 7646 if (relocType == relocInfo::virtual_call_type || 7647 relocType == relocInfo::opt_virtual_call_type || 7648 relocType == relocInfo::static_call_type || 7649 relocType == relocInfo::static_stub_type ) { 7650 // This should be rip relative within the code cache and easily 7651 // reachable until we get huge code caches. (At which point 7652 // ic code is going to have issues). 7653 return true; 7654 } 7655 if (relocType != relocInfo::external_word_type && 7656 relocType != relocInfo::poll_return_type && // these are really external_word but need special 7657 relocType != relocInfo::poll_type && // relocs to identify them 7658 relocType != relocInfo::runtime_call_type ) { 7659 return false; 7660 } 7661 7662 // Stress the correction code 7663 if (ForceUnreachable) { 7664 // Must be runtimecall reloc, see if it is in the codecache 7665 // Flipping stuff in the codecache to be unreachable causes issues 7666 // with things like inline caches where the additional instructions 7667 // are not handled. 7668 if (CodeCache::find_blob(adr._target) == NULL) { 7669 return false; 7670 } 7671 } 7672 // For external_word_type/runtime_call_type if it is reachable from where we 7673 // are now (possibly a temp buffer) and where we might end up 7674 // anywhere in the codeCache then we are always reachable. 7675 // This would have to change if we ever save/restore shared code 7676 // to be more pessimistic. 7677 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); 7678 if (!is_simm32(disp)) return false; 7679 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); 7680 if (!is_simm32(disp)) return false; 7681 7682 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int)); 7683 7684 // Because rip relative is a disp + address_of_next_instruction and we 7685 // don't know the value of address_of_next_instruction we apply a fudge factor 7686 // to make sure we will be ok no matter the size of the instruction we get placed into. 7687 // We don't have to fudge the checks above here because they are already worst case. 7688 7689 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal 7690 // + 4 because better safe than sorry. 7691 const int fudge = 12 + 4; 7692 if (disp < 0) { 7693 disp -= fudge; 7694 } else { 7695 disp += fudge; 7696 } 7697 return is_simm32(disp); 7698 } 7699 7700 // Check if the polling page is not reachable from the code cache using rip-relative 7701 // addressing. 7702 bool Assembler::is_polling_page_far() { 7703 intptr_t addr = (intptr_t)os::get_polling_page(); 7704 return ForceUnreachable || 7705 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || 7706 !is_simm32(addr - (intptr_t)CodeCache::high_bound()); 7707 } 7708 7709 void Assembler::emit_data64(jlong data, 7710 relocInfo::relocType rtype, 7711 int format) { 7712 if (rtype == relocInfo::none) { 7713 emit_int64(data); 7714 } else { 7715 emit_data64(data, Relocation::spec_simple(rtype), format); 7716 } 7717 } 7718 7719 void Assembler::emit_data64(jlong data, 7720 RelocationHolder const& rspec, 7721 int format) { 7722 assert(imm_operand == 0, "default format must be immediate in this file"); 7723 assert(imm_operand == format, "must be immediate"); 7724 assert(inst_mark() != NULL, "must be inside InstructionMark"); 7725 // Do not use AbstractAssembler::relocate, which is not intended for 7726 // embedded words. Instead, relocate to the enclosing instruction. 7727 code_section()->relocate(inst_mark(), rspec, format); 7728 #ifdef ASSERT 7729 check_relocation(rspec, format); 7730 #endif 7731 emit_int64(data); 7732 } 7733 7734 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { 7735 if (reg_enc >= 8) { 7736 prefix(REX_B); 7737 reg_enc -= 8; 7738 } else if (byteinst && reg_enc >= 4) { 7739 prefix(REX); 7740 } 7741 return reg_enc; 7742 } 7743 7744 int Assembler::prefixq_and_encode(int reg_enc) { 7745 if (reg_enc < 8) { 7746 prefix(REX_W); 7747 } else { 7748 prefix(REX_WB); 7749 reg_enc -= 8; 7750 } 7751 return reg_enc; 7752 } 7753 7754 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { 7755 if (dst_enc < 8) { 7756 if (src_enc >= 8) { 7757 prefix(REX_B); 7758 src_enc -= 8; 7759 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) { 7760 prefix(REX); 7761 } 7762 } else { 7763 if (src_enc < 8) { 7764 prefix(REX_R); 7765 } else { 7766 prefix(REX_RB); 7767 src_enc -= 8; 7768 } 7769 dst_enc -= 8; 7770 } 7771 return dst_enc << 3 | src_enc; 7772 } 7773 7774 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { 7775 static const int8_t prefixes[] = { 7776 REX_W, 7777 REX_WB, 7778 REX_WR, 7779 REX_WRB 7780 }; 7781 int idx = 0; 7782 if (dst_enc >= 8) { 7783 idx |= 2; 7784 dst_enc -= 8; 7785 } 7786 if (src_enc >= 8) { 7787 src_enc -= 8; 7788 idx |= 1; 7789 } 7790 emit_int8(prefixes[idx]); 7791 return dst_enc << 3 | src_enc; 7792 } 7793 7794 void Assembler::prefix(Register reg) { 7795 if (reg->encoding() >= 8) { 7796 prefix(REX_B); 7797 } 7798 } 7799 7800 void Assembler::prefix(Register dst, Register src, Prefix p) { 7801 if (src->encoding() >= 8) { 7802 p = (Prefix)(p | REX_B); 7803 } 7804 if (dst->encoding() >= 8) { 7805 p = (Prefix)( p | REX_R); 7806 } 7807 if (p != Prefix_EMPTY) { 7808 // do not generate an empty prefix 7809 prefix(p); 7810 } 7811 } 7812 7813 void Assembler::prefix(Register dst, Address adr, Prefix p) { 7814 if (adr.base_needs_rex()) { 7815 if (adr.index_needs_rex()) { 7816 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7817 } else { 7818 prefix(REX_B); 7819 } 7820 } else { 7821 if (adr.index_needs_rex()) { 7822 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X"); 7823 } 7824 } 7825 if (dst->encoding() >= 8) { 7826 p = (Prefix)(p | REX_R); 7827 } 7828 if (p != Prefix_EMPTY) { 7829 // do not generate an empty prefix 7830 prefix(p); 7831 } 7832 } 7833 7834 void Assembler::prefix(Address adr) { 7835 if (adr.base_needs_rex()) { 7836 if (adr.index_needs_rex()) { 7837 prefix(REX_XB); 7838 } else { 7839 prefix(REX_B); 7840 } 7841 } else { 7842 if (adr.index_needs_rex()) { 7843 prefix(REX_X); 7844 } 7845 } 7846 } 7847 7848 int8_t Assembler::get_prefixq(Address adr) { 7849 static const Assembler::Prefix prefixes[] = { 7850 REX_W, 7851 REX_WX, 7852 REX_WB, 7853 REX_WXB 7854 }; 7855 int idx = (int)adr.index_needs_rex() | ((int)adr.base_needs_rex() << 1); 7856 Assembler::Prefix prfx = prefixes[idx]; 7857 #ifdef ASSERT 7858 if (adr.base_needs_rex()) { 7859 if (adr.index_needs_rex()) { 7860 assert(prfx == REX_WXB, "must be"); 7861 } else { 7862 assert(prfx == REX_WB, "must be"); 7863 } 7864 } else { 7865 if (adr.index_needs_rex()) { 7866 assert(prfx == REX_WX, "must be"); 7867 } else { 7868 assert(prfx == REX_W, "must be"); 7869 } 7870 } 7871 #endif 7872 return (int8_t)prfx; 7873 } 7874 7875 void Assembler::prefixq(Address adr) { 7876 emit_int8(get_prefixq(adr)); 7877 } 7878 7879 void Assembler::prefix(Address adr, Register reg, bool byteinst) { 7880 if (reg->encoding() < 8) { 7881 if (adr.base_needs_rex()) { 7882 if (adr.index_needs_rex()) { 7883 prefix(REX_XB); 7884 } else { 7885 prefix(REX_B); 7886 } 7887 } else { 7888 if (adr.index_needs_rex()) { 7889 prefix(REX_X); 7890 } else if (byteinst && reg->encoding() >= 4 ) { 7891 prefix(REX); 7892 } 7893 } 7894 } else { 7895 if (adr.base_needs_rex()) { 7896 if (adr.index_needs_rex()) { 7897 prefix(REX_RXB); 7898 } else { 7899 prefix(REX_RB); 7900 } 7901 } else { 7902 if (adr.index_needs_rex()) { 7903 prefix(REX_RX); 7904 } else { 7905 prefix(REX_R); 7906 } 7907 } 7908 } 7909 } 7910 7911 int8_t Assembler::get_prefixq(Address adr, Register src) { 7912 static const int8_t prefixes[] = { 7913 REX_WR, 7914 REX_WRX, 7915 REX_WRB, 7916 REX_WRXB, 7917 REX_W, 7918 REX_WX, 7919 REX_WB, 7920 REX_WXB, 7921 }; 7922 int idx = (int)adr.index_needs_rex() | ((int)adr.base_needs_rex() << 1) | ((int)(src->encoding() < 8) << 2); 7923 return prefixes[idx]; 7924 } 7925 7926 void Assembler::prefixq(Address adr, Register src) { 7927 emit_int8(get_prefixq(adr, src)); 7928 } 7929 7930 void Assembler::prefix(Address adr, XMMRegister reg) { 7931 if (reg->encoding() < 8) { 7932 if (adr.base_needs_rex()) { 7933 if (adr.index_needs_rex()) { 7934 prefix(REX_XB); 7935 } else { 7936 prefix(REX_B); 7937 } 7938 } else { 7939 if (adr.index_needs_rex()) { 7940 prefix(REX_X); 7941 } 7942 } 7943 } else { 7944 if (adr.base_needs_rex()) { 7945 if (adr.index_needs_rex()) { 7946 prefix(REX_RXB); 7947 } else { 7948 prefix(REX_RB); 7949 } 7950 } else { 7951 if (adr.index_needs_rex()) { 7952 prefix(REX_RX); 7953 } else { 7954 prefix(REX_R); 7955 } 7956 } 7957 } 7958 } 7959 7960 void Assembler::prefixq(Address adr, XMMRegister src) { 7961 if (src->encoding() < 8) { 7962 if (adr.base_needs_rex()) { 7963 if (adr.index_needs_rex()) { 7964 prefix(REX_WXB); 7965 } else { 7966 prefix(REX_WB); 7967 } 7968 } else { 7969 if (adr.index_needs_rex()) { 7970 prefix(REX_WX); 7971 } else { 7972 prefix(REX_W); 7973 } 7974 } 7975 } else { 7976 if (adr.base_needs_rex()) { 7977 if (adr.index_needs_rex()) { 7978 prefix(REX_WRXB); 7979 } else { 7980 prefix(REX_WRB); 7981 } 7982 } else { 7983 if (adr.index_needs_rex()) { 7984 prefix(REX_WRX); 7985 } else { 7986 prefix(REX_WR); 7987 } 7988 } 7989 } 7990 } 7991 7992 void Assembler::adcq(Register dst, int32_t imm32) { 7993 (void) prefixq_and_encode(dst->encoding()); 7994 emit_arith(0x81, 0xD0, dst, imm32); 7995 } 7996 7997 void Assembler::adcq(Register dst, Address src) { 7998 InstructionMark im(this); 7999 emit_int16(get_prefixq(src, dst), 8000 0x13); 8001 emit_operand(dst, src); 8002 } 8003 8004 void Assembler::adcq(Register dst, Register src) { 8005 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8006 emit_arith(0x13, 0xC0, dst, src); 8007 } 8008 8009 void Assembler::addq(Address dst, int32_t imm32) { 8010 InstructionMark im(this); 8011 prefixq(dst); 8012 emit_arith_operand(0x81, rax, dst,imm32); 8013 } 8014 8015 void Assembler::addq(Address dst, Register src) { 8016 InstructionMark im(this); 8017 emit_int16(get_prefixq(dst, src), 8018 0x01); 8019 emit_operand(src, dst); 8020 } 8021 8022 void Assembler::addq(Register dst, int32_t imm32) { 8023 (void) prefixq_and_encode(dst->encoding()); 8024 emit_arith(0x81, 0xC0, dst, imm32); 8025 } 8026 8027 void Assembler::addq(Register dst, Address src) { 8028 InstructionMark im(this); 8029 emit_int16(get_prefixq(src, dst), 0x03); 8030 emit_operand(dst, src); 8031 } 8032 8033 void Assembler::addq(Register dst, Register src) { 8034 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8035 emit_arith(0x03, 0xC0, dst, src); 8036 } 8037 8038 void Assembler::adcxq(Register dst, Register src) { 8039 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 8040 emit_int8((unsigned char)0x66); 8041 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8042 emit_int32(0x0F, 8043 0x38, 8044 (unsigned char)0xF6, 8045 (unsigned char)(0xC0 | encode)); 8046 } 8047 8048 void Assembler::adoxq(Register dst, Register src) { 8049 //assert(VM_Version::supports_adx(), "adx instructions not supported"); 8050 emit_int8((unsigned char)0xF3); 8051 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8052 emit_int32(0x0F, 8053 0x38, 8054 (unsigned char)0xF6, 8055 (unsigned char)(0xC0 | encode)); 8056 } 8057 8058 void Assembler::andq(Address dst, int32_t imm32) { 8059 InstructionMark im(this); 8060 emit_int16(get_prefixq(dst), 8061 (unsigned char)0x81); 8062 emit_operand(rsp, dst, 4); 8063 emit_int32(imm32); 8064 } 8065 8066 void Assembler::andq(Register dst, int32_t imm32) { 8067 (void) prefixq_and_encode(dst->encoding()); 8068 emit_arith(0x81, 0xE0, dst, imm32); 8069 } 8070 8071 void Assembler::andq(Register dst, Address src) { 8072 InstructionMark im(this); 8073 emit_int16(get_prefixq(src, dst), 0x23); 8074 emit_operand(dst, src); 8075 } 8076 8077 void Assembler::andq(Register dst, Register src) { 8078 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8079 emit_arith(0x23, 0xC0, dst, src); 8080 } 8081 8082 void Assembler::andnq(Register dst, Register src1, Register src2) { 8083 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8084 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8085 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8086 emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); 8087 } 8088 8089 void Assembler::andnq(Register dst, Register src1, Address src2) { 8090 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8091 InstructionMark im(this); 8092 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8093 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8094 emit_int8((unsigned char)0xF2); 8095 emit_operand(dst, src2); 8096 } 8097 8098 void Assembler::bsfq(Register dst, Register src) { 8099 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8100 emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); 8101 } 8102 8103 void Assembler::bsrq(Register dst, Register src) { 8104 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8105 emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); 8106 } 8107 8108 void Assembler::bswapq(Register reg) { 8109 int encode = prefixq_and_encode(reg->encoding()); 8110 emit_int16(0x0F, (unsigned char)(0xC8 | encode)); 8111 } 8112 8113 void Assembler::blsiq(Register dst, Register src) { 8114 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8115 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8116 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8117 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 8118 } 8119 8120 void Assembler::blsiq(Register dst, Address src) { 8121 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8122 InstructionMark im(this); 8123 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8124 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8125 emit_int8((unsigned char)0xF3); 8126 emit_operand(rbx, src); 8127 } 8128 8129 void Assembler::blsmskq(Register dst, Register src) { 8130 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8131 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8132 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8133 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 8134 } 8135 8136 void Assembler::blsmskq(Register dst, Address src) { 8137 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8138 InstructionMark im(this); 8139 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8140 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8141 emit_int8((unsigned char)0xF3); 8142 emit_operand(rdx, src); 8143 } 8144 8145 void Assembler::blsrq(Register dst, Register src) { 8146 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8147 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8148 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8149 emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); 8150 } 8151 8152 void Assembler::blsrq(Register dst, Address src) { 8153 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); 8154 InstructionMark im(this); 8155 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8156 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); 8157 emit_int8((unsigned char)0xF3); 8158 emit_operand(rcx, src); 8159 } 8160 8161 void Assembler::cdqq() { 8162 emit_int16(REX_W, (unsigned char)0x99); 8163 } 8164 8165 void Assembler::clflush(Address adr) { 8166 assert(VM_Version::supports_clflush(), "should do"); 8167 prefix(adr); 8168 emit_int16(0x0F, (unsigned char)0xAE); 8169 emit_operand(rdi, adr); 8170 } 8171 8172 void Assembler::clflushopt(Address adr) { 8173 assert(VM_Version::supports_clflushopt(), "should do!"); 8174 // adr should be base reg only with no index or offset 8175 assert(adr.index() == noreg, "index should be noreg"); 8176 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 8177 assert(adr.disp() == 0, "displacement should be 0"); 8178 // instruction prefix is 0x66 8179 emit_int8(0x66); 8180 prefix(adr); 8181 // opcode family is 0x0F 0xAE 8182 emit_int16(0x0F, (unsigned char)0xAE); 8183 // extended opcode byte is 7 == rdi 8184 emit_operand(rdi, adr); 8185 } 8186 8187 void Assembler::clwb(Address adr) { 8188 assert(VM_Version::supports_clwb(), "should do!"); 8189 // adr should be base reg only with no index or offset 8190 assert(adr.index() == noreg, "index should be noreg"); 8191 assert(adr.scale() == Address::no_scale, "scale should be no_scale"); 8192 assert(adr.disp() == 0, "displacement should be 0"); 8193 // instruction prefix is 0x66 8194 emit_int8(0x66); 8195 prefix(adr); 8196 // opcode family is 0x0f 0xAE 8197 emit_int16(0x0F, (unsigned char)0xAE); 8198 // extended opcode byte is 6 == rsi 8199 emit_operand(rsi, adr); 8200 } 8201 8202 void Assembler::cmovq(Condition cc, Register dst, Register src) { 8203 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8204 emit_int24(0x0F, 0x40 | cc, (unsigned char)(0xC0 | encode)); 8205 } 8206 8207 void Assembler::cmovq(Condition cc, Register dst, Address src) { 8208 InstructionMark im(this); 8209 emit_int24(get_prefixq(src, dst), 0x0F, 0x40 | cc); 8210 emit_operand(dst, src); 8211 } 8212 8213 void Assembler::cmpq(Address dst, int32_t imm32) { 8214 InstructionMark im(this); 8215 emit_int16(get_prefixq(dst), (unsigned char)0x81); 8216 emit_operand(rdi, dst, 4); 8217 emit_int32(imm32); 8218 } 8219 8220 void Assembler::cmpq(Register dst, int32_t imm32) { 8221 (void) prefixq_and_encode(dst->encoding()); 8222 emit_arith(0x81, 0xF8, dst, imm32); 8223 } 8224 8225 void Assembler::cmpq(Address dst, Register src) { 8226 InstructionMark im(this); 8227 emit_int16(get_prefixq(dst, src), 0x3B); 8228 emit_operand(src, dst); 8229 } 8230 8231 void Assembler::cmpq(Register dst, Register src) { 8232 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8233 emit_arith(0x3B, 0xC0, dst, src); 8234 } 8235 8236 void Assembler::cmpq(Register dst, Address src) { 8237 InstructionMark im(this); 8238 emit_int16(get_prefixq(src, dst), 0x3B); 8239 emit_operand(dst, src); 8240 } 8241 8242 void Assembler::cmpxchgq(Register reg, Address adr) { 8243 InstructionMark im(this); 8244 emit_int24(get_prefixq(adr, reg), 0x0F, (unsigned char)0xB1); 8245 emit_operand(reg, adr); 8246 } 8247 8248 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { 8249 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8250 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8251 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8252 emit_int16(0x2A, (unsigned char)(0xC0 | encode)); 8253 } 8254 8255 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { 8256 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8257 InstructionMark im(this); 8258 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8259 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 8260 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8261 emit_int8(0x2A); 8262 emit_operand(dst, src); 8263 } 8264 8265 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) { 8266 NOT_LP64(assert(VM_Version::supports_sse(), "")); 8267 InstructionMark im(this); 8268 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8269 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit); 8270 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 8271 emit_int8(0x2A); 8272 emit_operand(dst, src); 8273 } 8274 8275 void Assembler::cvttsd2siq(Register dst, Address src) { 8276 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8277 // F2 REX.W 0F 2C /r 8278 // CVTTSD2SI r64, xmm1/m64 8279 InstructionMark im(this); 8280 emit_int32((unsigned char)0xF2, REX_W, 0x0F, 0x2C); 8281 emit_operand(dst, src); 8282 } 8283 8284 void Assembler::cvttsd2siq(Register dst, XMMRegister src) { 8285 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8286 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8287 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); 8288 emit_int16(0x2C, (unsigned char)(0xC0 | encode)); 8289 } 8290 8291 void Assembler::cvttss2siq(Register dst, XMMRegister src) { 8292 NOT_LP64(assert(VM_Version::supports_sse(), "")); 8293 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8294 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); 8295 emit_int16(0x2C, (unsigned char)(0xC0 | encode)); 8296 } 8297 8298 void Assembler::decl(Register dst) { 8299 // Don't use it directly. Use MacroAssembler::decrementl() instead. 8300 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) 8301 int encode = prefix_and_encode(dst->encoding()); 8302 emit_int16((unsigned char)0xFF, (unsigned char)(0xC8 | encode)); 8303 } 8304 8305 void Assembler::decq(Register dst) { 8306 // Don't use it directly. Use MacroAssembler::decrementq() instead. 8307 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8308 int encode = prefixq_and_encode(dst->encoding()); 8309 emit_int16((unsigned char)0xFF, 0xC8 | encode); 8310 } 8311 8312 void Assembler::decq(Address dst) { 8313 // Don't use it directly. Use MacroAssembler::decrementq() instead. 8314 InstructionMark im(this); 8315 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 8316 emit_operand(rcx, dst); 8317 } 8318 8319 void Assembler::fxrstor(Address src) { 8320 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 8321 emit_operand(as_Register(1), src); 8322 } 8323 8324 void Assembler::xrstor(Address src) { 8325 emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); 8326 emit_operand(as_Register(5), src); 8327 } 8328 8329 void Assembler::fxsave(Address dst) { 8330 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 8331 emit_operand(as_Register(0), dst); 8332 } 8333 8334 void Assembler::xsave(Address dst) { 8335 emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); 8336 emit_operand(as_Register(4), dst); 8337 } 8338 8339 void Assembler::idivq(Register src) { 8340 int encode = prefixq_and_encode(src->encoding()); 8341 emit_int16((unsigned char)0xF7, (unsigned char)(0xF8 | encode)); 8342 } 8343 8344 void Assembler::imulq(Register dst, Register src) { 8345 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8346 emit_int24(0x0F, (unsigned char)0xAF, (unsigned char)(0xC0 | encode)); 8347 } 8348 8349 void Assembler::imulq(Register dst, Register src, int value) { 8350 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8351 if (is8bit(value)) { 8352 emit_int24(0x6B, (unsigned char)(0xC0 | encode), value & 0xFF); 8353 } else { 8354 emit_int16(0x69, (unsigned char)(0xC0 | encode)); 8355 emit_int32(value); 8356 } 8357 } 8358 8359 void Assembler::imulq(Register dst, Address src) { 8360 InstructionMark im(this); 8361 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xAF); 8362 emit_operand(dst, src); 8363 } 8364 8365 void Assembler::incl(Register dst) { 8366 // Don't use it directly. Use MacroAssembler::incrementl() instead. 8367 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8368 int encode = prefix_and_encode(dst->encoding()); 8369 emit_int16((unsigned char)0xFF, (unsigned char)(0xC0 | encode)); 8370 } 8371 8372 void Assembler::incq(Register dst) { 8373 // Don't use it directly. Use MacroAssembler::incrementq() instead. 8374 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) 8375 int encode = prefixq_and_encode(dst->encoding()); 8376 emit_int16((unsigned char)0xFF, (unsigned char)(0xC0 | encode)); 8377 } 8378 8379 void Assembler::incq(Address dst) { 8380 // Don't use it directly. Use MacroAssembler::incrementq() instead. 8381 InstructionMark im(this); 8382 emit_int16(get_prefixq(dst), (unsigned char)0xFF); 8383 emit_operand(rax, dst); 8384 } 8385 8386 void Assembler::lea(Register dst, Address src) { 8387 leaq(dst, src); 8388 } 8389 8390 void Assembler::leaq(Register dst, Address src) { 8391 InstructionMark im(this); 8392 emit_int16(get_prefixq(src, dst), (unsigned char)0x8D); 8393 emit_operand(dst, src); 8394 } 8395 8396 void Assembler::mov64(Register dst, int64_t imm64) { 8397 InstructionMark im(this); 8398 int encode = prefixq_and_encode(dst->encoding()); 8399 emit_int8((unsigned char)(0xB8 | encode)); 8400 emit_int64(imm64); 8401 } 8402 8403 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) { 8404 InstructionMark im(this); 8405 int encode = prefixq_and_encode(dst->encoding()); 8406 emit_int8(0xB8 | encode); 8407 emit_data64(imm64, rspec); 8408 } 8409 8410 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) { 8411 InstructionMark im(this); 8412 int encode = prefix_and_encode(dst->encoding()); 8413 emit_int8((unsigned char)(0xB8 | encode)); 8414 emit_data((int)imm32, rspec, narrow_oop_operand); 8415 } 8416 8417 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) { 8418 InstructionMark im(this); 8419 prefix(dst); 8420 emit_int8((unsigned char)0xC7); 8421 emit_operand(rax, dst, 4); 8422 emit_data((int)imm32, rspec, narrow_oop_operand); 8423 } 8424 8425 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { 8426 InstructionMark im(this); 8427 int encode = prefix_and_encode(src1->encoding()); 8428 emit_int16((unsigned char)0x81, (unsigned char)(0xF8 | encode)); 8429 emit_data((int)imm32, rspec, narrow_oop_operand); 8430 } 8431 8432 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { 8433 InstructionMark im(this); 8434 prefix(src1); 8435 emit_int8((unsigned char)0x81); 8436 emit_operand(rax, src1, 4); 8437 emit_data((int)imm32, rspec, narrow_oop_operand); 8438 } 8439 8440 void Assembler::lzcntq(Register dst, Register src) { 8441 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); 8442 emit_int8((unsigned char)0xF3); 8443 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8444 emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); 8445 } 8446 8447 void Assembler::movdq(XMMRegister dst, Register src) { 8448 // table D-1 says MMX/SSE2 8449 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8450 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8451 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8452 emit_int16(0x6E, (unsigned char)(0xC0 | encode)); 8453 } 8454 8455 void Assembler::movdq(Register dst, XMMRegister src) { 8456 // table D-1 says MMX/SSE2 8457 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 8458 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); 8459 // swap src/dst to get correct prefix 8460 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); 8461 emit_int16(0x7E, (unsigned char)(0xC0 | encode)); 8462 } 8463 8464 void Assembler::movq(Register dst, Register src) { 8465 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8466 emit_int16((unsigned char)0x8B, (unsigned char)(0xC0 | encode)); 8467 } 8468 8469 void Assembler::movq(Register dst, Address src) { 8470 InstructionMark im(this); 8471 emit_int16(get_prefixq(src, dst), (unsigned char)0x8B); 8472 emit_operand(dst, src); 8473 } 8474 8475 void Assembler::movq(Address dst, Register src) { 8476 InstructionMark im(this); 8477 emit_int16(get_prefixq(dst, src), (unsigned char)0x89); 8478 emit_operand(src, dst); 8479 } 8480 8481 void Assembler::movsbq(Register dst, Address src) { 8482 InstructionMark im(this); 8483 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xBE); 8484 emit_operand(dst, src); 8485 } 8486 8487 void Assembler::movsbq(Register dst, Register src) { 8488 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8489 emit_int24(0x0F, (unsigned char)0xBE, (unsigned char)(0xC0 | encode)); 8490 } 8491 8492 void Assembler::movslq(Register dst, int32_t imm32) { 8493 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) 8494 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) 8495 // as a result we shouldn't use until tested at runtime... 8496 ShouldNotReachHere(); 8497 InstructionMark im(this); 8498 int encode = prefixq_and_encode(dst->encoding()); 8499 emit_int8((unsigned char)(0xC7 | encode)); 8500 emit_int32(imm32); 8501 } 8502 8503 void Assembler::movslq(Address dst, int32_t imm32) { 8504 assert(is_simm32(imm32), "lost bits"); 8505 InstructionMark im(this); 8506 emit_int16(get_prefixq(dst), (unsigned char)0xC7); 8507 emit_operand(rax, dst, 4); 8508 emit_int32(imm32); 8509 } 8510 8511 void Assembler::movslq(Register dst, Address src) { 8512 InstructionMark im(this); 8513 emit_int16(get_prefixq(src, dst), 0x63); 8514 emit_operand(dst, src); 8515 } 8516 8517 void Assembler::movslq(Register dst, Register src) { 8518 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8519 emit_int16(0x63, (unsigned char)(0xC0 | encode)); 8520 } 8521 8522 void Assembler::movswq(Register dst, Address src) { 8523 InstructionMark im(this); 8524 emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xBF); 8525 emit_operand(dst, src); 8526 } 8527 8528 void Assembler::movswq(Register dst, Register src) { 8529 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8530 emit_int24((unsigned char)0x0F, (unsigned char)0xBF, (unsigned char)(0xC0 | encode)); 8531 } 8532 8533 void Assembler::movzbq(Register dst, Address src) { 8534 InstructionMark im(this); 8535 emit_int24(get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB6); 8536 emit_operand(dst, src); 8537 } 8538 8539 void Assembler::movzbq(Register dst, Register src) { 8540 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8541 emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); 8542 } 8543 8544 void Assembler::movzwq(Register dst, Address src) { 8545 InstructionMark im(this); 8546 emit_int24(get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB7); 8547 emit_operand(dst, src); 8548 } 8549 8550 void Assembler::movzwq(Register dst, Register src) { 8551 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8552 emit_int24((unsigned char)0x0F, (unsigned char)0xB7, (unsigned char)(0xC0 | encode)); 8553 } 8554 8555 void Assembler::mulq(Address src) { 8556 InstructionMark im(this); 8557 emit_int16(get_prefixq(src), (unsigned char)0xF7); 8558 emit_operand(rsp, src); 8559 } 8560 8561 void Assembler::mulq(Register src) { 8562 int encode = prefixq_and_encode(src->encoding()); 8563 emit_int16((unsigned char)0xF7, (unsigned char)(0xE0 | encode)); 8564 } 8565 8566 void Assembler::mulxq(Register dst1, Register dst2, Register src) { 8567 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8568 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8569 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); 8570 emit_int16((unsigned char)0xF6, (unsigned char)(0xC0 | encode)); 8571 } 8572 8573 void Assembler::negq(Register dst) { 8574 int encode = prefixq_and_encode(dst->encoding()); 8575 emit_int16((unsigned char)0xF7, (unsigned char)(0xD8 | encode)); 8576 } 8577 8578 void Assembler::notq(Register dst) { 8579 int encode = prefixq_and_encode(dst->encoding()); 8580 emit_int16((unsigned char)0xF7, (unsigned char)(0xD0 | encode)); 8581 } 8582 8583 void Assembler::btsq(Address dst, int imm8) { 8584 assert(isByte(imm8), "not a byte"); 8585 InstructionMark im(this); 8586 emit_int24(get_prefixq(dst), (unsigned char)0x0F, (unsigned char)0xBA); 8587 emit_operand(rbp /* 5 */, dst, 1); 8588 emit_int8(imm8); 8589 } 8590 8591 void Assembler::btrq(Address dst, int imm8) { 8592 assert(isByte(imm8), "not a byte"); 8593 InstructionMark im(this); 8594 emit_int24(get_prefixq(dst), (unsigned char)0x0F, (unsigned char)0xBA); 8595 emit_operand(rsi /* 6 */, dst, 1); 8596 emit_int8(imm8); 8597 } 8598 8599 void Assembler::orq(Address dst, int32_t imm32) { 8600 InstructionMark im(this); 8601 emit_int16(get_prefixq(dst), (unsigned char)0x81); 8602 emit_operand(rcx, dst, 4); 8603 emit_int32(imm32); 8604 } 8605 8606 void Assembler::orq(Register dst, int32_t imm32) { 8607 (void) prefixq_and_encode(dst->encoding()); 8608 emit_arith(0x81, 0xC8, dst, imm32); 8609 } 8610 8611 void Assembler::orq(Register dst, Address src) { 8612 InstructionMark im(this); 8613 emit_int16(get_prefixq(src, dst), 0x0B); 8614 emit_operand(dst, src); 8615 } 8616 8617 void Assembler::orq(Register dst, Register src) { 8618 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8619 emit_arith(0x0B, 0xC0, dst, src); 8620 } 8621 8622 void Assembler::popcntq(Register dst, Address src) { 8623 assert(VM_Version::supports_popcnt(), "must support"); 8624 InstructionMark im(this); 8625 emit_int32((unsigned char)0xF3, get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB8); 8626 emit_operand(dst, src); 8627 } 8628 8629 void Assembler::popcntq(Register dst, Register src) { 8630 assert(VM_Version::supports_popcnt(), "must support"); 8631 emit_int8((unsigned char)0xF3); 8632 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8633 emit_int24((unsigned char)0x0F, (unsigned char)0xB8, (unsigned char)(0xC0 | encode)); 8634 } 8635 8636 void Assembler::popq(Address dst) { 8637 InstructionMark im(this); 8638 emit_int16(get_prefixq(dst), (unsigned char)0x8F); 8639 emit_operand(rax, dst); 8640 } 8641 8642 // Precomputable: popa, pusha, vzeroupper 8643 8644 // The result of these routines are invariant from one invocation to another 8645 // invocation for the duration of a run. Caching the result on bootstrap 8646 // and copying it out on subsequent invocations can thus be beneficial 8647 static bool precomputed = false; 8648 8649 static u_char* popa_code = NULL; 8650 static int popa_len = 0; 8651 8652 static u_char* pusha_code = NULL; 8653 static int pusha_len = 0; 8654 8655 static u_char* vzup_code = NULL; 8656 static int vzup_len = 0; 8657 8658 void Assembler::precompute_instructions() { 8659 assert(!Universe::is_fully_initialized(), "must still be single threaded"); 8660 guarantee(!precomputed, "only once"); 8661 precomputed = true; 8662 ResourceMark rm; 8663 8664 // Make a temporary buffer big enough for the routines we're capturing 8665 int size = 256; 8666 char* tmp_code = NEW_RESOURCE_ARRAY(char, size); 8667 CodeBuffer buffer((address)tmp_code, size); 8668 MacroAssembler masm(&buffer); 8669 8670 address begin_popa = masm.code_section()->end(); 8671 masm.popa_uncached(); 8672 address end_popa = masm.code_section()->end(); 8673 masm.pusha_uncached(); 8674 address end_pusha = masm.code_section()->end(); 8675 masm.vzeroupper_uncached(); 8676 address end_vzup = masm.code_section()->end(); 8677 8678 // Save the instructions to permanent buffers. 8679 popa_len = (int)(end_popa - begin_popa); 8680 popa_code = NEW_C_HEAP_ARRAY(u_char, popa_len, mtInternal); 8681 memcpy(popa_code, begin_popa, popa_len); 8682 8683 pusha_len = (int)(end_pusha - end_popa); 8684 pusha_code = NEW_C_HEAP_ARRAY(u_char, pusha_len, mtInternal); 8685 memcpy(pusha_code, end_popa, pusha_len); 8686 8687 vzup_len = (int)(end_vzup - end_pusha); 8688 if (vzup_len > 0) { 8689 vzup_code = NEW_C_HEAP_ARRAY(u_char, vzup_len, mtInternal); 8690 memcpy(vzup_code, end_pusha, vzup_len); 8691 } else { 8692 vzup_code = pusha_code; // dummy 8693 } 8694 8695 assert(masm.code()->total_oop_size() == 0 && 8696 masm.code()->total_metadata_size() == 0 && 8697 masm.code()->total_relocation_size() == 0, 8698 "pre-computed code can't reference oops, metadata or contain relocations"); 8699 } 8700 8701 static void emit_copy(CodeSection* code_section, u_char* src, int src_len) { 8702 assert(src != NULL, "code to copy must have been pre-computed"); 8703 assert(code_section->limit() - code_section->end() > src_len, "code buffer not large enough"); 8704 address end = code_section->end(); 8705 memcpy(end, src, src_len); 8706 code_section->set_end(end + src_len); 8707 } 8708 8709 8710 void Assembler::popa() { // 64bit 8711 emit_copy(code_section(), popa_code, popa_len); 8712 } 8713 8714 void Assembler::popa_uncached() { // 64bit 8715 movq(r15, Address(rsp, 0)); 8716 movq(r14, Address(rsp, wordSize)); 8717 movq(r13, Address(rsp, 2 * wordSize)); 8718 movq(r12, Address(rsp, 3 * wordSize)); 8719 movq(r11, Address(rsp, 4 * wordSize)); 8720 movq(r10, Address(rsp, 5 * wordSize)); 8721 movq(r9, Address(rsp, 6 * wordSize)); 8722 movq(r8, Address(rsp, 7 * wordSize)); 8723 movq(rdi, Address(rsp, 8 * wordSize)); 8724 movq(rsi, Address(rsp, 9 * wordSize)); 8725 movq(rbp, Address(rsp, 10 * wordSize)); 8726 // skip rsp 8727 movq(rbx, Address(rsp, 12 * wordSize)); 8728 movq(rdx, Address(rsp, 13 * wordSize)); 8729 movq(rcx, Address(rsp, 14 * wordSize)); 8730 movq(rax, Address(rsp, 15 * wordSize)); 8731 8732 addq(rsp, 16 * wordSize); 8733 } 8734 8735 void Assembler::pusha() { // 64bit 8736 emit_copy(code_section(), pusha_code, pusha_len); 8737 } 8738 8739 void Assembler::pusha_uncached() { // 64bit 8740 // we have to store original rsp. ABI says that 128 bytes 8741 // below rsp are local scratch. 8742 movq(Address(rsp, -5 * wordSize), rsp); 8743 8744 subq(rsp, 16 * wordSize); 8745 8746 movq(Address(rsp, 15 * wordSize), rax); 8747 movq(Address(rsp, 14 * wordSize), rcx); 8748 movq(Address(rsp, 13 * wordSize), rdx); 8749 movq(Address(rsp, 12 * wordSize), rbx); 8750 // skip rsp 8751 movq(Address(rsp, 10 * wordSize), rbp); 8752 movq(Address(rsp, 9 * wordSize), rsi); 8753 movq(Address(rsp, 8 * wordSize), rdi); 8754 movq(Address(rsp, 7 * wordSize), r8); 8755 movq(Address(rsp, 6 * wordSize), r9); 8756 movq(Address(rsp, 5 * wordSize), r10); 8757 movq(Address(rsp, 4 * wordSize), r11); 8758 movq(Address(rsp, 3 * wordSize), r12); 8759 movq(Address(rsp, 2 * wordSize), r13); 8760 movq(Address(rsp, wordSize), r14); 8761 movq(Address(rsp, 0), r15); 8762 } 8763 8764 void Assembler::vzeroupper() { 8765 emit_copy(code_section(), vzup_code, vzup_len); 8766 } 8767 8768 void Assembler::pushq(Address src) { 8769 InstructionMark im(this); 8770 emit_int16(get_prefixq(src), (unsigned char)0xFF); 8771 emit_operand(rsi, src); 8772 } 8773 8774 void Assembler::rclq(Register dst, int imm8) { 8775 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8776 int encode = prefixq_and_encode(dst->encoding()); 8777 if (imm8 == 1) { 8778 emit_int16((unsigned char)0xD1, (unsigned char)(0xD0 | encode)); 8779 } else { 8780 emit_int24((unsigned char)0xC1, (unsigned char)(0xD0 | encode), imm8); 8781 } 8782 } 8783 8784 void Assembler::rcrq(Register dst, int imm8) { 8785 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8786 int encode = prefixq_and_encode(dst->encoding()); 8787 if (imm8 == 1) { 8788 emit_int16((unsigned char)0xD1, (unsigned char)(0xD8 | encode)); 8789 } else { 8790 emit_int24((unsigned char)0xC1, (unsigned char)(0xD8 | encode), imm8); 8791 } 8792 } 8793 8794 void Assembler::rorq(Register dst, int imm8) { 8795 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8796 int encode = prefixq_and_encode(dst->encoding()); 8797 if (imm8 == 1) { 8798 emit_int16((unsigned char)0xD1, (unsigned char)(0xC8 | encode)); 8799 } else { 8800 emit_int24((unsigned char)0xC1, (unsigned char)(0xc8 | encode), imm8); 8801 } 8802 } 8803 8804 void Assembler::rorxq(Register dst, Register src, int imm8) { 8805 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8806 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8807 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 8808 emit_int24((unsigned char)0xF0, (unsigned char)(0xC0 | encode), imm8); 8809 } 8810 8811 void Assembler::rorxd(Register dst, Register src, int imm8) { 8812 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); 8813 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); 8814 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); 8815 emit_int24((unsigned char)0xF0, (unsigned char)(0xC0 | encode), imm8); 8816 } 8817 8818 void Assembler::sarq(Register dst, int imm8) { 8819 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8820 int encode = prefixq_and_encode(dst->encoding()); 8821 if (imm8 == 1) { 8822 emit_int16((unsigned char)0xD1, (unsigned char)(0xF8 | encode)); 8823 } else { 8824 emit_int24((unsigned char)0xC1, (unsigned char)(0xF8 | encode), imm8); 8825 } 8826 } 8827 8828 void Assembler::sarq(Register dst) { 8829 int encode = prefixq_and_encode(dst->encoding()); 8830 emit_int16((unsigned char)0xD3, (unsigned char)(0xF8 | encode)); 8831 } 8832 8833 void Assembler::sbbq(Address dst, int32_t imm32) { 8834 InstructionMark im(this); 8835 prefixq(dst); 8836 emit_arith_operand(0x81, rbx, dst, imm32); 8837 } 8838 8839 void Assembler::sbbq(Register dst, int32_t imm32) { 8840 (void) prefixq_and_encode(dst->encoding()); 8841 emit_arith(0x81, 0xD8, dst, imm32); 8842 } 8843 8844 void Assembler::sbbq(Register dst, Address src) { 8845 InstructionMark im(this); 8846 emit_int16(get_prefixq(src, dst), 0x1B); 8847 emit_operand(dst, src); 8848 } 8849 8850 void Assembler::sbbq(Register dst, Register src) { 8851 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8852 emit_arith(0x1B, 0xC0, dst, src); 8853 } 8854 8855 void Assembler::shlq(Register dst, int imm8) { 8856 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8857 int encode = prefixq_and_encode(dst->encoding()); 8858 if (imm8 == 1) { 8859 emit_int16((unsigned char)0xD1, (unsigned char)(0xE0 | encode)); 8860 } else { 8861 emit_int24((unsigned char)0xC1, (unsigned char)(0xE0 | encode), imm8); 8862 } 8863 } 8864 8865 void Assembler::shlq(Register dst) { 8866 int encode = prefixq_and_encode(dst->encoding()); 8867 emit_int16((unsigned char)0xD3, (unsigned char)(0xE0 | encode)); 8868 } 8869 8870 void Assembler::shrq(Register dst, int imm8) { 8871 assert(isShiftCount(imm8 >> 1), "illegal shift count"); 8872 int encode = prefixq_and_encode(dst->encoding()); 8873 emit_int24((unsigned char)0xC1, (unsigned char)(0xE8 | encode), imm8); 8874 } 8875 8876 void Assembler::shrq(Register dst) { 8877 int encode = prefixq_and_encode(dst->encoding()); 8878 emit_int16((unsigned char)0xD3, 0xE8 | encode); 8879 } 8880 8881 void Assembler::subq(Address dst, int32_t imm32) { 8882 InstructionMark im(this); 8883 prefixq(dst); 8884 emit_arith_operand(0x81, rbp, dst, imm32); 8885 } 8886 8887 void Assembler::subq(Address dst, Register src) { 8888 InstructionMark im(this); 8889 emit_int16(get_prefixq(dst, src), 0x29); 8890 emit_operand(src, dst); 8891 } 8892 8893 void Assembler::subq(Register dst, int32_t imm32) { 8894 (void) prefixq_and_encode(dst->encoding()); 8895 emit_arith(0x81, 0xE8, dst, imm32); 8896 } 8897 8898 // Force generation of a 4 byte immediate value even if it fits into 8bit 8899 void Assembler::subq_imm32(Register dst, int32_t imm32) { 8900 (void) prefixq_and_encode(dst->encoding()); 8901 emit_arith_imm32(0x81, 0xE8, dst, imm32); 8902 } 8903 8904 void Assembler::subq(Register dst, Address src) { 8905 InstructionMark im(this); 8906 emit_int16(get_prefixq(src, dst), 0x2B); 8907 emit_operand(dst, src); 8908 } 8909 8910 void Assembler::subq(Register dst, Register src) { 8911 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8912 emit_arith(0x2B, 0xC0, dst, src); 8913 } 8914 8915 void Assembler::testq(Register dst, int32_t imm32) { 8916 // not using emit_arith because test 8917 // doesn't support sign-extension of 8918 // 8bit operands 8919 int encode = dst->encoding(); 8920 if (encode == 0) { 8921 emit_int16(REX_W, (unsigned char)0xA9); 8922 } else { 8923 encode = prefixq_and_encode(encode); 8924 emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); 8925 } 8926 emit_int32(imm32); 8927 } 8928 8929 void Assembler::testq(Register dst, Register src) { 8930 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8931 emit_arith(0x85, 0xC0, dst, src); 8932 } 8933 8934 void Assembler::testq(Register dst, Address src) { 8935 InstructionMark im(this); 8936 emit_int16(get_prefixq(src, dst), (unsigned char)0x85); 8937 emit_operand(dst, src); 8938 } 8939 8940 void Assembler::xaddq(Address dst, Register src) { 8941 InstructionMark im(this); 8942 emit_int24(get_prefixq(dst, src), 0x0F, (unsigned char)0xC1); 8943 emit_operand(src, dst); 8944 } 8945 8946 void Assembler::xchgq(Register dst, Address src) { 8947 InstructionMark im(this); 8948 emit_int16(get_prefixq(src, dst), (unsigned char)0x87); 8949 emit_operand(dst, src); 8950 } 8951 8952 void Assembler::xchgq(Register dst, Register src) { 8953 int encode = prefixq_and_encode(dst->encoding(), src->encoding()); 8954 emit_int16((unsigned char)0x87, (unsigned char)(0xc0 | encode)); 8955 } 8956 8957 void Assembler::xorq(Register dst, Register src) { 8958 (void) prefixq_and_encode(dst->encoding(), src->encoding()); 8959 emit_arith(0x33, 0xC0, dst, src); 8960 } 8961 8962 void Assembler::xorq(Register dst, Address src) { 8963 InstructionMark im(this); 8964 emit_int16(get_prefixq(src, dst), 0x33); 8965 emit_operand(dst, src); 8966 } 8967 8968 #endif // !LP64