1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_ASSEMBLER_X86_HPP
26 #define CPU_X86_ASSEMBLER_X86_HPP
27
28 #include "asm/register.hpp"
29 #include "runtime/vm_version.hpp"
30
31 class BiasedLockingCounters;
32
33 // Contains all the definitions needed for x86 assembly code generation.
34
35 // Calling convention
36 class Argument {
37 public:
38 enum {
39 #ifdef _LP64
40 #ifdef _WIN64
41 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
42 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... )
43 #else
44 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
45 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
46 #endif // _WIN64
47 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
48 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
49 #else
50 n_register_parameters = 0 // 0 registers used to pass arguments
51 #endif // _LP64
52 };
53 };
54
55
56 #ifdef _LP64
57 // Symbolically name the register arguments used by the c calling convention.
58 // Windows is different from linux/solaris. So much for standards...
59
60 #ifdef _WIN64
61
62 REGISTER_DECLARATION(Register, c_rarg0, rcx);
63 REGISTER_DECLARATION(Register, c_rarg1, rdx);
64 REGISTER_DECLARATION(Register, c_rarg2, r8);
65 REGISTER_DECLARATION(Register, c_rarg3, r9);
66
67 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
68 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
69 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
70 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
71
72 #else
73
74 REGISTER_DECLARATION(Register, c_rarg0, rdi);
75 REGISTER_DECLARATION(Register, c_rarg1, rsi);
76 REGISTER_DECLARATION(Register, c_rarg2, rdx);
77 REGISTER_DECLARATION(Register, c_rarg3, rcx);
78 REGISTER_DECLARATION(Register, c_rarg4, r8);
79 REGISTER_DECLARATION(Register, c_rarg5, r9);
80
81 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
82 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
83 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
84 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
85 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
86 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
87 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
88 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
89
90 #endif // _WIN64
91
92 // Symbolically name the register arguments used by the Java calling convention.
93 // We have control over the convention for java so we can do what we please.
94 // What pleases us is to offset the java calling convention so that when
95 // we call a suitable jni method the arguments are lined up and we don't
96 // have to do little shuffling. A suitable jni method is non-static and a
97 // small number of arguments (two fewer args on windows)
98 //
99 // |-------------------------------------------------------|
100 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 |
101 // |-------------------------------------------------------|
102 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg)
103 // | rdi rsi rdx rcx r8 r9 | solaris/linux
104 // |-------------------------------------------------------|
105 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
106 // |-------------------------------------------------------|
107
108 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
109 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
110 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
111 // Windows runs out of register args here
112 #ifdef _WIN64
113 REGISTER_DECLARATION(Register, j_rarg3, rdi);
114 REGISTER_DECLARATION(Register, j_rarg4, rsi);
115 #else
116 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
117 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
118 #endif /* _WIN64 */
119 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
120
121 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
122 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
123 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
124 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
125 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
126 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
127 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
128 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
129
130 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
131 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
132
133 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
134 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
135
136 #else
137 // rscratch1 will apear in 32bit code that is dead but of course must compile
138 // Using noreg ensures if the dead code is incorrectly live and executed it
139 // will cause an assertion failure
140 #define rscratch1 noreg
141 #define rscratch2 noreg
142
143 #endif // _LP64
144
145 // JSR 292
146 // On x86, the SP does not have to be saved when invoking method handle intrinsics
147 // or compiled lambda forms. We indicate that by setting rbp_mh_SP_save to noreg.
148 REGISTER_DECLARATION(Register, rbp_mh_SP_save, noreg);
149
150 // Address is an abstraction used to represent a memory location
151 // using any of the amd64 addressing modes with one object.
152 //
153 // Note: A register location is represented via a Register, not
154 // via an address for efficiency & simplicity reasons.
155
156 class ArrayAddress;
157
158 class Address {
159 public:
160 enum ScaleFactor {
161 no_scale = -1,
162 times_1 = 0,
163 times_2 = 1,
164 times_4 = 2,
165 times_8 = 3,
166 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
167 };
168 static ScaleFactor times(int size) {
169 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size");
170 if (size == 8) return times_8;
171 if (size == 4) return times_4;
172 if (size == 2) return times_2;
173 return times_1;
174 }
175 static int scale_size(ScaleFactor scale) {
176 assert(scale != no_scale, "");
177 assert(((1 << (int)times_1) == 1 &&
178 (1 << (int)times_2) == 2 &&
179 (1 << (int)times_4) == 4 &&
180 (1 << (int)times_8) == 8), "");
181 return (1 << (int)scale);
182 }
183
184 private:
185 Register _base;
186 Register _index;
187 XMMRegister _xmmindex;
188 ScaleFactor _scale;
189 int _disp;
190 bool _isxmmindex;
191 RelocationHolder _rspec;
192
193 // Easily misused constructors make them private
194 // %%% can we make these go away?
195 NOT_LP64(Address(address loc, RelocationHolder spec);)
196 Address(int disp, address loc, relocInfo::relocType rtype);
197 Address(int disp, address loc, RelocationHolder spec);
198
199 public:
200
201 int disp() { return _disp; }
202 // creation
203 Address()
204 : _base(noreg),
205 _index(noreg),
206 _xmmindex(xnoreg),
207 _scale(no_scale),
208 _disp(0),
209 _isxmmindex(false){
210 }
211
212 // No default displacement otherwise Register can be implicitly
213 // converted to 0(Register) which is quite a different animal.
214
215 Address(Register base, int disp)
216 : _base(base),
217 _index(noreg),
218 _xmmindex(xnoreg),
219 _scale(no_scale),
220 _disp(disp),
221 _isxmmindex(false){
222 }
223
224 Address(Register base, Register index, ScaleFactor scale, int disp = 0)
225 : _base (base),
226 _index(index),
227 _xmmindex(xnoreg),
228 _scale(scale),
229 _disp (disp),
230 _isxmmindex(false) {
231 assert(!index->is_valid() == (scale == Address::no_scale),
232 "inconsistent address");
233 }
234
235 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0)
236 : _base (base),
237 _index(index.register_or_noreg()),
238 _xmmindex(xnoreg),
239 _scale(scale),
240 _disp (disp + (index.constant_or_zero() * scale_size(scale))),
241 _isxmmindex(false){
242 if (!index.is_register()) scale = Address::no_scale;
243 assert(!_index->is_valid() == (scale == Address::no_scale),
244 "inconsistent address");
245 }
246
247 Address(Register base, XMMRegister index, ScaleFactor scale, int disp = 0)
248 : _base (base),
249 _index(noreg),
250 _xmmindex(index),
251 _scale(scale),
252 _disp(disp),
253 _isxmmindex(true) {
254 assert(!index->is_valid() == (scale == Address::no_scale),
255 "inconsistent address");
256 }
257
258 Address plus_disp(int disp) const {
259 Address a = (*this);
260 a._disp += disp;
261 return a;
262 }
263 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const {
264 Address a = (*this);
265 a._disp += disp.constant_or_zero() * scale_size(scale);
266 if (disp.is_register()) {
267 assert(!a.index()->is_valid(), "competing indexes");
268 a._index = disp.as_register();
269 a._scale = scale;
270 }
271 return a;
272 }
273 bool is_same_address(Address a) const {
274 // disregard _rspec
275 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale;
276 }
277
278 // The following two overloads are used in connection with the
279 // ByteSize type (see sizes.hpp). They simplify the use of
280 // ByteSize'd arguments in assembly code. Note that their equivalent
281 // for the optimized build are the member functions with int disp
282 // argument since ByteSize is mapped to an int type in that case.
283 //
284 // Note: DO NOT introduce similar overloaded functions for WordSize
285 // arguments as in the optimized mode, both ByteSize and WordSize
286 // are mapped to the same type and thus the compiler cannot make a
287 // distinction anymore (=> compiler errors).
288
289 #ifdef ASSERT
290 Address(Register base, ByteSize disp)
291 : _base(base),
292 _index(noreg),
293 _xmmindex(xnoreg),
294 _scale(no_scale),
295 _disp(in_bytes(disp)),
296 _isxmmindex(false){
297 }
298
299 Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
300 : _base(base),
301 _index(index),
302 _xmmindex(xnoreg),
303 _scale(scale),
304 _disp(in_bytes(disp)),
305 _isxmmindex(false){
306 assert(!index->is_valid() == (scale == Address::no_scale),
307 "inconsistent address");
308 }
309 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp)
310 : _base (base),
311 _index(index.register_or_noreg()),
312 _xmmindex(xnoreg),
313 _scale(scale),
314 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))),
315 _isxmmindex(false) {
316 if (!index.is_register()) scale = Address::no_scale;
317 assert(!_index->is_valid() == (scale == Address::no_scale),
318 "inconsistent address");
319 }
320
321 #endif // ASSERT
322
323 // accessors
324 bool uses(Register reg) const { return _base == reg || _index == reg; }
325 Register base() const { return _base; }
326 Register index() const { return _index; }
327 XMMRegister xmmindex() const { return _xmmindex; }
328 ScaleFactor scale() const { return _scale; }
329 int disp() const { return _disp; }
330 bool isxmmindex() const { return _isxmmindex; }
331
332 // Convert the raw encoding form into the form expected by the constructor for
333 // Address. An index of 4 (rsp) corresponds to having no index, so convert
334 // that to noreg for the Address constructor.
335 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
336
337 static Address make_array(ArrayAddress);
338
339 private:
340 bool base_needs_rex() const {
341 return _base != noreg && _base->encoding() >= 8;
342 }
343
344 bool index_needs_rex() const {
345 return _index != noreg &&_index->encoding() >= 8;
346 }
347
348 bool xmmindex_needs_rex() const {
349 return _xmmindex != xnoreg && _xmmindex->encoding() >= 8;
350 }
351
352 relocInfo::relocType reloc() const { return _rspec.type(); }
353
354 friend class Assembler;
355 friend class MacroAssembler;
356 friend class LIR_Assembler; // base/index/scale/disp
357 };
358
359 //
360 // AddressLiteral has been split out from Address because operands of this type
361 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
362 // the few instructions that need to deal with address literals are unique and the
363 // MacroAssembler does not have to implement every instruction in the Assembler
364 // in order to search for address literals that may need special handling depending
365 // on the instruction and the platform. As small step on the way to merging i486/amd64
366 // directories.
367 //
368 class AddressLiteral {
369 friend class ArrayAddress;
370 RelocationHolder _rspec;
371 // Typically we use AddressLiterals we want to use their rval
372 // However in some situations we want the lval (effect address) of the item.
373 // We provide a special factory for making those lvals.
374 bool _is_lval;
375
376 // If the target is far we'll need to load the ea of this to
377 // a register to reach it. Otherwise if near we can do rip
378 // relative addressing.
379
380 address _target;
381
382 protected:
383 // creation
384 AddressLiteral()
385 : _is_lval(false),
386 _target(NULL)
387 {}
388
389 public:
390
391
392 AddressLiteral(address target, relocInfo::relocType rtype);
393
394 AddressLiteral(address target, RelocationHolder const& rspec)
395 : _rspec(rspec),
396 _is_lval(false),
397 _target(target)
398 {}
399
400 AddressLiteral addr() {
401 AddressLiteral ret = *this;
402 ret._is_lval = true;
403 return ret;
404 }
405
406
407 private:
408
409 address target() { return _target; }
410 bool is_lval() { return _is_lval; }
411
412 relocInfo::relocType reloc() const { return _rspec.type(); }
413 const RelocationHolder& rspec() const { return _rspec; }
414
415 friend class Assembler;
416 friend class MacroAssembler;
417 friend class Address;
418 friend class LIR_Assembler;
419 };
420
421 // Convience classes
422 class RuntimeAddress: public AddressLiteral {
423
424 public:
425
426 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
427
428 };
429
430 class ExternalAddress: public AddressLiteral {
431 private:
432 static relocInfo::relocType reloc_for_target(address target) {
433 // Sometimes ExternalAddress is used for values which aren't
434 // exactly addresses, like the card table base.
435 // external_word_type can't be used for values in the first page
436 // so just skip the reloc in that case.
437 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
438 }
439
440 public:
441
442 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {}
443
444 };
445
446 class InternalAddress: public AddressLiteral {
447
448 public:
449
450 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
451
452 };
453
454 // x86 can do array addressing as a single operation since disp can be an absolute
455 // address amd64 can't. We create a class that expresses the concept but does extra
456 // magic on amd64 to get the final result
457
458 class ArrayAddress {
459 private:
460
461 AddressLiteral _base;
462 Address _index;
463
464 public:
465
466 ArrayAddress() {};
467 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
468 AddressLiteral base() { return _base; }
469 Address index() { return _index; }
470
471 };
472
473 class InstructionAttr;
474
475 // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes
476 // See fxsave and xsave(EVEX enabled) documentation for layout
477 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize);
478
479 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
480 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
481 // is what you get. The Assembler is generating code into a CodeBuffer.
482
483 class Assembler : public AbstractAssembler {
484 friend class AbstractAssembler; // for the non-virtual hack
485 friend class LIR_Assembler; // as_Address()
486 friend class StubGenerator;
487
488 public:
489 enum Condition { // The x86 condition codes used for conditional jumps/moves.
490 zero = 0x4,
491 notZero = 0x5,
492 equal = 0x4,
493 notEqual = 0x5,
494 less = 0xc,
495 lessEqual = 0xe,
496 greater = 0xf,
497 greaterEqual = 0xd,
498 below = 0x2,
499 belowEqual = 0x6,
500 above = 0x7,
501 aboveEqual = 0x3,
502 overflow = 0x0,
503 noOverflow = 0x1,
504 carrySet = 0x2,
505 carryClear = 0x3,
506 negative = 0x8,
507 positive = 0x9,
508 parity = 0xa,
509 noParity = 0xb
510 };
511
512 enum Prefix {
513 // segment overrides
514 CS_segment = 0x2e,
515 SS_segment = 0x36,
516 DS_segment = 0x3e,
517 ES_segment = 0x26,
518 FS_segment = 0x64,
519 GS_segment = 0x65,
520
521 REX = 0x40,
522
523 REX_B = 0x41,
524 REX_X = 0x42,
525 REX_XB = 0x43,
526 REX_R = 0x44,
527 REX_RB = 0x45,
528 REX_RX = 0x46,
529 REX_RXB = 0x47,
530
531 REX_W = 0x48,
532
533 REX_WB = 0x49,
534 REX_WX = 0x4A,
535 REX_WXB = 0x4B,
536 REX_WR = 0x4C,
537 REX_WRB = 0x4D,
538 REX_WRX = 0x4E,
539 REX_WRXB = 0x4F,
540
541 VEX_3bytes = 0xC4,
542 VEX_2bytes = 0xC5,
543 EVEX_4bytes = 0x62,
544 Prefix_EMPTY = 0x0
545 };
546
547 enum VexPrefix {
548 VEX_B = 0x20,
549 VEX_X = 0x40,
550 VEX_R = 0x80,
551 VEX_W = 0x80
552 };
553
554 enum ExexPrefix {
555 EVEX_F = 0x04,
556 EVEX_V = 0x08,
557 EVEX_Rb = 0x10,
558 EVEX_X = 0x40,
559 EVEX_Z = 0x80
560 };
561
562 enum VexSimdPrefix {
563 VEX_SIMD_NONE = 0x0,
564 VEX_SIMD_66 = 0x1,
565 VEX_SIMD_F3 = 0x2,
566 VEX_SIMD_F2 = 0x3
567 };
568
569 enum VexOpcode {
570 VEX_OPCODE_NONE = 0x0,
571 VEX_OPCODE_0F = 0x1,
572 VEX_OPCODE_0F_38 = 0x2,
573 VEX_OPCODE_0F_3A = 0x3,
574 VEX_OPCODE_MASK = 0x1F
575 };
576
577 enum AvxVectorLen {
578 AVX_128bit = 0x0,
579 AVX_256bit = 0x1,
580 AVX_512bit = 0x2,
581 AVX_NoVec = 0x4
582 };
583
584 enum EvexTupleType {
585 EVEX_FV = 0,
586 EVEX_HV = 4,
587 EVEX_FVM = 6,
588 EVEX_T1S = 7,
589 EVEX_T1F = 11,
590 EVEX_T2 = 13,
591 EVEX_T4 = 15,
592 EVEX_T8 = 17,
593 EVEX_HVM = 18,
594 EVEX_QVM = 19,
595 EVEX_OVM = 20,
596 EVEX_M128 = 21,
597 EVEX_DUP = 22,
598 EVEX_ETUP = 23
599 };
600
601 enum EvexInputSizeInBits {
602 EVEX_8bit = 0,
603 EVEX_16bit = 1,
604 EVEX_32bit = 2,
605 EVEX_64bit = 3,
606 EVEX_NObit = 4
607 };
608
609 enum WhichOperand {
610 // input to locate_operand, and format code for relocations
611 imm_operand = 0, // embedded 32-bit|64-bit immediate operand
612 disp32_operand = 1, // embedded 32-bit displacement or address
613 call32_operand = 2, // embedded 32-bit self-relative displacement
614 #ifndef _LP64
615 _WhichOperand_limit = 3
616 #else
617 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
618 _WhichOperand_limit = 4
619 #endif
620 };
621
622 enum ComparisonPredicate {
623 eq = 0,
624 lt = 1,
625 le = 2,
626 _false = 3,
627 neq = 4,
628 nlt = 5,
629 nle = 6,
630 _true = 7
631 };
632
633 //---< calculate length of instruction >---
634 // As instruction size can't be found out easily on x86/x64,
635 // we just use '4' for len and maxlen.
636 // instruction must start at passed address
637 static unsigned int instr_len(unsigned char *instr) { return 4; }
638
639 //---< longest instructions >---
640 // Max instruction length is not specified in architecture documentation.
641 // We could use a "safe enough" estimate (15), but just default to
642 // instruction length guess from above.
643 static unsigned int instr_maxlen() { return 4; }
644
645 // NOTE: The general philopsophy of the declarations here is that 64bit versions
646 // of instructions are freely declared without the need for wrapping them an ifdef.
647 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
648 // In the .cpp file the implementations are wrapped so that they are dropped out
649 // of the resulting jvm. This is done mostly to keep the footprint of MINIMAL
650 // to the size it was prior to merging up the 32bit and 64bit assemblers.
651 //
652 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
653 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
654
655 private:
656
657 bool _legacy_mode_bw;
658 bool _legacy_mode_dq;
659 bool _legacy_mode_vl;
660 bool _legacy_mode_vlbw;
661 bool _is_managed;
662 bool _vector_masking; // For stub code use only
663
664 class InstructionAttr *_attributes;
665
666 // 64bit prefixes
667 int prefix_and_encode(int reg_enc, bool byteinst = false);
668 int prefixq_and_encode(int reg_enc);
669
670 int prefix_and_encode(int dst_enc, int src_enc) {
671 return prefix_and_encode(dst_enc, false, src_enc, false);
672 }
673 int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
674 int prefixq_and_encode(int dst_enc, int src_enc);
675
676 void prefix(Register reg);
677 void prefix(Register dst, Register src, Prefix p);
678 void prefix(Register dst, Address adr, Prefix p);
679 void prefix(Address adr);
680 void prefixq(Address adr);
681
682 void prefix(Address adr, Register reg, bool byteinst = false);
683 void prefix(Address adr, XMMRegister reg);
684 void prefixq(Address adr, Register reg);
685 void prefixq(Address adr, XMMRegister reg);
686
687 void prefetch_prefix(Address src);
688
689 void rex_prefix(Address adr, XMMRegister xreg,
690 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
691 int rex_prefix_and_encode(int dst_enc, int src_enc,
692 VexSimdPrefix pre, VexOpcode opc, bool rex_w);
693
694 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
695
696 void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
697 int nds_enc, VexSimdPrefix pre, VexOpcode opc);
698
699 void vex_prefix(Address adr, int nds_enc, int xreg_enc,
700 VexSimdPrefix pre, VexOpcode opc,
701 InstructionAttr *attributes);
702
703 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
704 VexSimdPrefix pre, VexOpcode opc,
705 InstructionAttr *attributes);
706
707 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
708 VexOpcode opc, InstructionAttr *attributes);
709
710 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
711 VexOpcode opc, InstructionAttr *attributes);
712
713 // Helper functions for groups of instructions
714 void emit_arith_b(int op1, int op2, Register dst, int imm8);
715
716 void emit_arith(int op1, int op2, Register dst, int32_t imm32);
717 // Force generation of a 4 byte immediate value even if it fits into 8bit
718 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
719 void emit_arith(int op1, int op2, Register dst, Register src);
720
721 bool emit_compressed_disp_byte(int &disp);
722
723 void emit_operand(Register reg,
724 Register base, Register index, Address::ScaleFactor scale,
725 int disp,
726 RelocationHolder const& rspec,
727 int rip_relative_correction = 0);
728
729 void emit_operand(XMMRegister reg, Register base, XMMRegister index,
730 Address::ScaleFactor scale,
731 int disp, RelocationHolder const& rspec);
732
733 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
734
735 // operands that only take the original 32bit registers
736 void emit_operand32(Register reg, Address adr);
737
738 void emit_operand(XMMRegister reg,
739 Register base, Register index, Address::ScaleFactor scale,
740 int disp,
741 RelocationHolder const& rspec);
742
743 void emit_operand(XMMRegister reg, Address adr);
744
745 void emit_operand(MMXRegister reg, Address adr);
746
747 // workaround gcc (3.2.1-7) bug
748 void emit_operand(Address adr, MMXRegister reg);
749
750
751 // Immediate-to-memory forms
752 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
753
754 void emit_farith(int b1, int b2, int i);
755
756
757 protected:
758 #ifdef ASSERT
759 void check_relocation(RelocationHolder const& rspec, int format);
760 #endif
761
762 void emit_data(jint data, relocInfo::relocType rtype, int format);
763 void emit_data(jint data, RelocationHolder const& rspec, int format);
764 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
765 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
766
767 bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
768
769 // These are all easily abused and hence protected
770
771 // 32BIT ONLY SECTION
772 #ifndef _LP64
773 // Make these disappear in 64bit mode since they would never be correct
774 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
775 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
776
777 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
778 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
779
780 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
781 #else
782 // 64BIT ONLY SECTION
783 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY
784
785 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec);
786 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec);
787
788 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec);
789 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec);
790 #endif // _LP64
791
792 // These are unique in that we are ensured by the caller that the 32bit
793 // relative in these instructions will always be able to reach the potentially
794 // 64bit address described by entry. Since they can take a 64bit address they
795 // don't have the 32 suffix like the other instructions in this class.
796
797 void call_literal(address entry, RelocationHolder const& rspec);
798 void jmp_literal(address entry, RelocationHolder const& rspec);
799
800 // Avoid using directly section
801 // Instructions in this section are actually usable by anyone without danger
802 // of failure but have performance issues that are addressed my enhanced
803 // instructions which will do the proper thing base on the particular cpu.
804 // We protect them because we don't trust you...
805
806 // Don't use next inc() and dec() methods directly. INC & DEC instructions
807 // could cause a partial flag stall since they don't set CF flag.
808 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
809 // which call inc() & dec() or add() & sub() in accordance with
810 // the product flag UseIncDec value.
811
812 void decl(Register dst);
813 void decl(Address dst);
814 void decq(Register dst);
815 void decq(Address dst);
816
817 void incl(Register dst);
818 void incl(Address dst);
819 void incq(Register dst);
820 void incq(Address dst);
821
822 // New cpus require use of movsd and movss to avoid partial register stall
823 // when loading from memory. But for old Opteron use movlpd instead of movsd.
824 // The selection is done in MacroAssembler::movdbl() and movflt().
825
826 // Move Scalar Single-Precision Floating-Point Values
827 void movss(XMMRegister dst, Address src);
828 void movss(XMMRegister dst, XMMRegister src);
829 void movss(Address dst, XMMRegister src);
830
831 // Move Scalar Double-Precision Floating-Point Values
832 void movsd(XMMRegister dst, Address src);
833 void movsd(XMMRegister dst, XMMRegister src);
834 void movsd(Address dst, XMMRegister src);
835 void movlpd(XMMRegister dst, Address src);
836
837 // New cpus require use of movaps and movapd to avoid partial register stall
838 // when moving between registers.
839 void movaps(XMMRegister dst, XMMRegister src);
840 void movapd(XMMRegister dst, XMMRegister src);
841
842 // End avoid using directly
843
844
845 // Instruction prefixes
846 void prefix(Prefix p);
847
848 public:
849
850 // Creation
851 Assembler(CodeBuffer* code) : AbstractAssembler(code) {
852 init_attributes();
853 }
854
855 // Decoding
856 static address locate_operand(address inst, WhichOperand which);
857 static address locate_next_instruction(address inst);
858
859 // Utilities
860 static bool is_polling_page_far() NOT_LP64({ return false;});
861 static bool query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
862 int cur_tuple_type, int in_size_in_bits, int cur_encoding);
863
864 // Generic instructions
865 // Does 32bit or 64bit as needed for the platform. In some sense these
866 // belong in macro assembler but there is no need for both varieties to exist
867
868 void init_attributes(void) {
869 _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
870 _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
871 _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
872 _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
873 _is_managed = false;
874 _vector_masking = false;
875 _attributes = NULL;
876 }
877
878 void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
879 void clear_attributes(void) { _attributes = NULL; }
880
881 void set_managed(void) { _is_managed = true; }
882 void clear_managed(void) { _is_managed = false; }
883 bool is_managed(void) { return _is_managed; }
884
885 void lea(Register dst, Address src);
886
887 void mov(Register dst, Register src);
888
889 void pusha();
890 void popa();
891
892 void pushf();
893 void popf();
894
895 void push(int32_t imm32);
896
897 void push(Register src);
898
899 void pop(Register dst);
900
901 // These are dummies to prevent surprise implicit conversions to Register
902 void push(void* v);
903 void pop(void* v);
904
905 // These do register sized moves/scans
906 void rep_mov();
907 void rep_stos();
908 void rep_stosb();
909 void repne_scan();
910 #ifdef _LP64
911 void repne_scanl();
912 #endif
913
914 // Vanilla instructions in lexical order
915
916 void adcl(Address dst, int32_t imm32);
917 void adcl(Address dst, Register src);
918 void adcl(Register dst, int32_t imm32);
919 void adcl(Register dst, Address src);
920 void adcl(Register dst, Register src);
921
922 void adcq(Register dst, int32_t imm32);
923 void adcq(Register dst, Address src);
924 void adcq(Register dst, Register src);
925
926 void addb(Address dst, int imm8);
927 void addw(Address dst, int imm16);
928
929 void addl(Address dst, int32_t imm32);
930 void addl(Address dst, Register src);
931 void addl(Register dst, int32_t imm32);
932 void addl(Register dst, Address src);
933 void addl(Register dst, Register src);
934
935 void addq(Address dst, int32_t imm32);
936 void addq(Address dst, Register src);
937 void addq(Register dst, int32_t imm32);
938 void addq(Register dst, Address src);
939 void addq(Register dst, Register src);
940
941 #ifdef _LP64
942 //Add Unsigned Integers with Carry Flag
943 void adcxq(Register dst, Register src);
944
945 //Add Unsigned Integers with Overflow Flag
946 void adoxq(Register dst, Register src);
947 #endif
948
949 void addr_nop_4();
950 void addr_nop_5();
951 void addr_nop_7();
952 void addr_nop_8();
953
954 // Add Scalar Double-Precision Floating-Point Values
955 void addsd(XMMRegister dst, Address src);
956 void addsd(XMMRegister dst, XMMRegister src);
957
958 // Add Scalar Single-Precision Floating-Point Values
959 void addss(XMMRegister dst, Address src);
960 void addss(XMMRegister dst, XMMRegister src);
961
962 // AES instructions
963 void aesdec(XMMRegister dst, Address src);
964 void aesdec(XMMRegister dst, XMMRegister src);
965 void aesdeclast(XMMRegister dst, Address src);
966 void aesdeclast(XMMRegister dst, XMMRegister src);
967 void aesenc(XMMRegister dst, Address src);
968 void aesenc(XMMRegister dst, XMMRegister src);
969 void aesenclast(XMMRegister dst, Address src);
970 void aesenclast(XMMRegister dst, XMMRegister src);
971 // Vector AES instructions
972 void vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
973 void vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
974 void vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
975 void vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
976
977 void andl(Address dst, int32_t imm32);
978 void andl(Register dst, int32_t imm32);
979 void andl(Register dst, Address src);
980 void andl(Register dst, Register src);
981
982 void andq(Address dst, int32_t imm32);
983 void andq(Register dst, int32_t imm32);
984 void andq(Register dst, Address src);
985 void andq(Register dst, Register src);
986
987 // BMI instructions
988 void andnl(Register dst, Register src1, Register src2);
989 void andnl(Register dst, Register src1, Address src2);
990 void andnq(Register dst, Register src1, Register src2);
991 void andnq(Register dst, Register src1, Address src2);
992
993 void blsil(Register dst, Register src);
994 void blsil(Register dst, Address src);
995 void blsiq(Register dst, Register src);
996 void blsiq(Register dst, Address src);
997
998 void blsmskl(Register dst, Register src);
999 void blsmskl(Register dst, Address src);
1000 void blsmskq(Register dst, Register src);
1001 void blsmskq(Register dst, Address src);
1002
1003 void blsrl(Register dst, Register src);
1004 void blsrl(Register dst, Address src);
1005 void blsrq(Register dst, Register src);
1006 void blsrq(Register dst, Address src);
1007
1008 void bsfl(Register dst, Register src);
1009 void bsrl(Register dst, Register src);
1010
1011 #ifdef _LP64
1012 void bsfq(Register dst, Register src);
1013 void bsrq(Register dst, Register src);
1014 #endif
1015
1016 void bswapl(Register reg);
1017
1018 void bswapq(Register reg);
1019
1020 void call(Label& L, relocInfo::relocType rtype);
1021 void call(Register reg); // push pc; pc <- reg
1022 void call(Address adr); // push pc; pc <- adr
1023
1024 void cdql();
1025
1026 void cdqq();
1027
1028 void cld();
1029
1030 void clflush(Address adr);
1031 void clflushopt(Address adr);
1032 void clwb(Address adr);
1033
1034 void cmovl(Condition cc, Register dst, Register src);
1035 void cmovl(Condition cc, Register dst, Address src);
1036
1037 void cmovq(Condition cc, Register dst, Register src);
1038 void cmovq(Condition cc, Register dst, Address src);
1039
1040
1041 void cmpb(Address dst, int imm8);
1042
1043 void cmpl(Address dst, int32_t imm32);
1044
1045 void cmpl(Register dst, int32_t imm32);
1046 void cmpl(Register dst, Register src);
1047 void cmpl(Register dst, Address src);
1048
1049 void cmpq(Address dst, int32_t imm32);
1050 void cmpq(Address dst, Register src);
1051
1052 void cmpq(Register dst, int32_t imm32);
1053 void cmpq(Register dst, Register src);
1054 void cmpq(Register dst, Address src);
1055
1056 // these are dummies used to catch attempting to convert NULL to Register
1057 void cmpl(Register dst, void* junk); // dummy
1058 void cmpq(Register dst, void* junk); // dummy
1059
1060 void cmpw(Address dst, int imm16);
1061
1062 void cmpxchg8 (Address adr);
1063
1064 void cmpxchgb(Register reg, Address adr);
1065 void cmpxchgl(Register reg, Address adr);
1066
1067 void cmpxchgq(Register reg, Address adr);
1068
1069 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1070 void comisd(XMMRegister dst, Address src);
1071 void comisd(XMMRegister dst, XMMRegister src);
1072
1073 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1074 void comiss(XMMRegister dst, Address src);
1075 void comiss(XMMRegister dst, XMMRegister src);
1076
1077 // Identify processor type and features
1078 void cpuid();
1079
1080 // CRC32C
1081 void crc32(Register crc, Register v, int8_t sizeInBytes);
1082 void crc32(Register crc, Address adr, int8_t sizeInBytes);
1083
1084 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
1085 void cvtsd2ss(XMMRegister dst, XMMRegister src);
1086 void cvtsd2ss(XMMRegister dst, Address src);
1087
1088 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
1089 void cvtsi2sdl(XMMRegister dst, Register src);
1090 void cvtsi2sdl(XMMRegister dst, Address src);
1091 void cvtsi2sdq(XMMRegister dst, Register src);
1092 void cvtsi2sdq(XMMRegister dst, Address src);
1093
1094 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
1095 void cvtsi2ssl(XMMRegister dst, Register src);
1096 void cvtsi2ssl(XMMRegister dst, Address src);
1097 void cvtsi2ssq(XMMRegister dst, Register src);
1098 void cvtsi2ssq(XMMRegister dst, Address src);
1099
1100 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
1101 void cvtdq2pd(XMMRegister dst, XMMRegister src);
1102
1103 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
1104 void cvtdq2ps(XMMRegister dst, XMMRegister src);
1105
1106 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
1107 void cvtss2sd(XMMRegister dst, XMMRegister src);
1108 void cvtss2sd(XMMRegister dst, Address src);
1109
1110 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
1111 void cvttsd2sil(Register dst, Address src);
1112 void cvttsd2sil(Register dst, XMMRegister src);
1113 void cvttsd2siq(Register dst, XMMRegister src);
1114
1115 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
1116 void cvttss2sil(Register dst, XMMRegister src);
1117 void cvttss2siq(Register dst, XMMRegister src);
1118
1119 void cvttpd2dq(XMMRegister dst, XMMRegister src);
1120
1121 //Abs of packed Integer values
1122 void pabsb(XMMRegister dst, XMMRegister src);
1123 void pabsw(XMMRegister dst, XMMRegister src);
1124 void pabsd(XMMRegister dst, XMMRegister src);
1125 void vpabsb(XMMRegister dst, XMMRegister src, int vector_len);
1126 void vpabsw(XMMRegister dst, XMMRegister src, int vector_len);
1127 void vpabsd(XMMRegister dst, XMMRegister src, int vector_len);
1128 void evpabsq(XMMRegister dst, XMMRegister src, int vector_len);
1129
1130 // Divide Scalar Double-Precision Floating-Point Values
1131 void divsd(XMMRegister dst, Address src);
1132 void divsd(XMMRegister dst, XMMRegister src);
1133
1134 // Divide Scalar Single-Precision Floating-Point Values
1135 void divss(XMMRegister dst, Address src);
1136 void divss(XMMRegister dst, XMMRegister src);
1137
1138 void emms();
1139
1140 void fabs();
1141
1142 void fadd(int i);
1143
1144 void fadd_d(Address src);
1145 void fadd_s(Address src);
1146
1147 // "Alternate" versions of x87 instructions place result down in FPU
1148 // stack instead of on TOS
1149
1150 void fadda(int i); // "alternate" fadd
1151 void faddp(int i = 1);
1152
1153 void fchs();
1154
1155 void fcom(int i);
1156
1157 void fcomp(int i = 1);
1158 void fcomp_d(Address src);
1159 void fcomp_s(Address src);
1160
1161 void fcompp();
1162
1163 void fcos();
1164
1165 void fdecstp();
1166
1167 void fdiv(int i);
1168 void fdiv_d(Address src);
1169 void fdivr_s(Address src);
1170 void fdiva(int i); // "alternate" fdiv
1171 void fdivp(int i = 1);
1172
1173 void fdivr(int i);
1174 void fdivr_d(Address src);
1175 void fdiv_s(Address src);
1176
1177 void fdivra(int i); // "alternate" reversed fdiv
1178
1179 void fdivrp(int i = 1);
1180
1181 void ffree(int i = 0);
1182
1183 void fild_d(Address adr);
1184 void fild_s(Address adr);
1185
1186 void fincstp();
1187
1188 void finit();
1189
1190 void fist_s (Address adr);
1191 void fistp_d(Address adr);
1192 void fistp_s(Address adr);
1193
1194 void fld1();
1195
1196 void fld_d(Address adr);
1197 void fld_s(Address adr);
1198 void fld_s(int index);
1199 void fld_x(Address adr); // extended-precision (80-bit) format
1200
1201 void fldcw(Address src);
1202
1203 void fldenv(Address src);
1204
1205 void fldlg2();
1206
1207 void fldln2();
1208
1209 void fldz();
1210
1211 void flog();
1212 void flog10();
1213
1214 void fmul(int i);
1215
1216 void fmul_d(Address src);
1217 void fmul_s(Address src);
1218
1219 void fmula(int i); // "alternate" fmul
1220
1221 void fmulp(int i = 1);
1222
1223 void fnsave(Address dst);
1224
1225 void fnstcw(Address src);
1226
1227 void fnstsw_ax();
1228
1229 void fprem();
1230 void fprem1();
1231
1232 void frstor(Address src);
1233
1234 void fsin();
1235
1236 void fsqrt();
1237
1238 void fst_d(Address adr);
1239 void fst_s(Address adr);
1240
1241 void fstp_d(Address adr);
1242 void fstp_d(int index);
1243 void fstp_s(Address adr);
1244 void fstp_x(Address adr); // extended-precision (80-bit) format
1245
1246 void fsub(int i);
1247 void fsub_d(Address src);
1248 void fsub_s(Address src);
1249
1250 void fsuba(int i); // "alternate" fsub
1251
1252 void fsubp(int i = 1);
1253
1254 void fsubr(int i);
1255 void fsubr_d(Address src);
1256 void fsubr_s(Address src);
1257
1258 void fsubra(int i); // "alternate" reversed fsub
1259
1260 void fsubrp(int i = 1);
1261
1262 void ftan();
1263
1264 void ftst();
1265
1266 void fucomi(int i = 1);
1267 void fucomip(int i = 1);
1268
1269 void fwait();
1270
1271 void fxch(int i = 1);
1272
1273 void fxrstor(Address src);
1274 void xrstor(Address src);
1275
1276 void fxsave(Address dst);
1277 void xsave(Address dst);
1278
1279 void fyl2x();
1280 void frndint();
1281 void f2xm1();
1282 void fldl2e();
1283
1284 void hlt();
1285
1286 void idivl(Register src);
1287 void divl(Register src); // Unsigned division
1288
1289 #ifdef _LP64
1290 void idivq(Register src);
1291 #endif
1292
1293 void imull(Register src);
1294 void imull(Register dst, Register src);
1295 void imull(Register dst, Register src, int value);
1296 void imull(Register dst, Address src);
1297
1298 #ifdef _LP64
1299 void imulq(Register dst, Register src);
1300 void imulq(Register dst, Register src, int value);
1301 void imulq(Register dst, Address src);
1302 #endif
1303
1304 // jcc is the generic conditional branch generator to run-
1305 // time routines, jcc is used for branches to labels. jcc
1306 // takes a branch opcode (cc) and a label (L) and generates
1307 // either a backward branch or a forward branch and links it
1308 // to the label fixup chain. Usage:
1309 //
1310 // Label L; // unbound label
1311 // jcc(cc, L); // forward branch to unbound label
1312 // bind(L); // bind label to the current pc
1313 // jcc(cc, L); // backward branch to bound label
1314 // bind(L); // illegal: a label may be bound only once
1315 //
1316 // Note: The same Label can be used for forward and backward branches
1317 // but it may be bound only once.
1318
1319 void jcc(Condition cc, Label& L, bool maybe_short = true);
1320
1321 // Conditional jump to a 8-bit offset to L.
1322 // WARNING: be very careful using this for forward jumps. If the label is
1323 // not bound within an 8-bit offset of this instruction, a run-time error
1324 // will occur.
1325
1326 // Use macro to record file and line number.
1327 #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__)
1328
1329 void jccb_0(Condition cc, Label& L, const char* file, int line);
1330
1331 void jmp(Address entry); // pc <- entry
1332
1333 // Label operations & relative jumps (PPUM Appendix D)
1334 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L
1335
1336 void jmp(Register entry); // pc <- entry
1337
1338 // Unconditional 8-bit offset jump to L.
1339 // WARNING: be very careful using this for forward jumps. If the label is
1340 // not bound within an 8-bit offset of this instruction, a run-time error
1341 // will occur.
1342
1343 // Use macro to record file and line number.
1344 #define jmpb(L) jmpb_0(L, __FILE__, __LINE__)
1345
1346 void jmpb_0(Label& L, const char* file, int line);
1347
1348 void ldmxcsr( Address src );
1349
1350 void leal(Register dst, Address src);
1351
1352 void leaq(Register dst, Address src);
1353
1354 void lfence();
1355
1356 void lock();
1357
1358 void lzcntl(Register dst, Register src);
1359
1360 #ifdef _LP64
1361 void lzcntq(Register dst, Register src);
1362 #endif
1363
1364 enum Membar_mask_bits {
1365 StoreStore = 1 << 3,
1366 LoadStore = 1 << 2,
1367 StoreLoad = 1 << 1,
1368 LoadLoad = 1 << 0
1369 };
1370
1371 // Serializes memory and blows flags
1372 void membar(Membar_mask_bits order_constraint) {
1373 // We only have to handle StoreLoad
1374 if (order_constraint & StoreLoad) {
1375 // All usable chips support "locked" instructions which suffice
1376 // as barriers, and are much faster than the alternative of
1377 // using cpuid instruction. We use here a locked add [esp-C],0.
1378 // This is conveniently otherwise a no-op except for blowing
1379 // flags, and introducing a false dependency on target memory
1380 // location. We can't do anything with flags, but we can avoid
1381 // memory dependencies in the current method by locked-adding
1382 // somewhere else on the stack. Doing [esp+C] will collide with
1383 // something on stack in current method, hence we go for [esp-C].
1384 // It is convenient since it is almost always in data cache, for
1385 // any small C. We need to step back from SP to avoid data
1386 // dependencies with other things on below SP (callee-saves, for
1387 // example). Without a clear way to figure out the minimal safe
1388 // distance from SP, it makes sense to step back the complete
1389 // cache line, as this will also avoid possible second-order effects
1390 // with locked ops against the cache line. Our choice of offset
1391 // is bounded by x86 operand encoding, which should stay within
1392 // [-128; +127] to have the 8-byte displacement encoding.
1393 //
1394 // Any change to this code may need to revisit other places in
1395 // the code where this idiom is used, in particular the
1396 // orderAccess code.
1397
1398 int offset = -VM_Version::L1_line_size();
1399 if (offset < -128) {
1400 offset = -128;
1401 }
1402
1403 lock();
1404 addl(Address(rsp, offset), 0);// Assert the lock# signal here
1405 }
1406 }
1407
1408 void mfence();
1409 void sfence();
1410
1411 // Moves
1412
1413 void mov64(Register dst, int64_t imm64);
1414
1415 void movb(Address dst, Register src);
1416 void movb(Address dst, int imm8);
1417 void movb(Register dst, Address src);
1418
1419 void movddup(XMMRegister dst, XMMRegister src);
1420
1421 void kmovbl(KRegister dst, Register src);
1422 void kmovbl(Register dst, KRegister src);
1423 void kmovwl(KRegister dst, Register src);
1424 void kmovwl(KRegister dst, Address src);
1425 void kmovwl(Register dst, KRegister src);
1426 void kmovdl(KRegister dst, Register src);
1427 void kmovdl(Register dst, KRegister src);
1428 void kmovql(KRegister dst, KRegister src);
1429 void kmovql(Address dst, KRegister src);
1430 void kmovql(KRegister dst, Address src);
1431 void kmovql(KRegister dst, Register src);
1432 void kmovql(Register dst, KRegister src);
1433
1434 void knotwl(KRegister dst, KRegister src);
1435
1436 void kortestbl(KRegister dst, KRegister src);
1437 void kortestwl(KRegister dst, KRegister src);
1438 void kortestdl(KRegister dst, KRegister src);
1439 void kortestql(KRegister dst, KRegister src);
1440
1441 void ktestq(KRegister src1, KRegister src2);
1442 void ktestd(KRegister src1, KRegister src2);
1443
1444 void ktestql(KRegister dst, KRegister src);
1445
1446 void movdl(XMMRegister dst, Register src);
1447 void movdl(Register dst, XMMRegister src);
1448 void movdl(XMMRegister dst, Address src);
1449 void movdl(Address dst, XMMRegister src);
1450
1451 // Move Double Quadword
1452 void movdq(XMMRegister dst, Register src);
1453 void movdq(Register dst, XMMRegister src);
1454
1455 // Move Aligned Double Quadword
1456 void movdqa(XMMRegister dst, XMMRegister src);
1457 void movdqa(XMMRegister dst, Address src);
1458
1459 // Move Unaligned Double Quadword
1460 void movdqu(Address dst, XMMRegister src);
1461 void movdqu(XMMRegister dst, Address src);
1462 void movdqu(XMMRegister dst, XMMRegister src);
1463
1464 // Move Unaligned 256bit Vector
1465 void vmovdqu(Address dst, XMMRegister src);
1466 void vmovdqu(XMMRegister dst, Address src);
1467 void vmovdqu(XMMRegister dst, XMMRegister src);
1468
1469 // Move Unaligned 512bit Vector
1470 void evmovdqub(Address dst, XMMRegister src, int vector_len);
1471 void evmovdqub(XMMRegister dst, Address src, int vector_len);
1472 void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
1473 void evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len);
1474 void evmovdquw(Address dst, XMMRegister src, int vector_len);
1475 void evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len);
1476 void evmovdquw(XMMRegister dst, Address src, int vector_len);
1477 void evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len);
1478 void evmovdqul(Address dst, XMMRegister src, int vector_len);
1479 void evmovdqul(XMMRegister dst, Address src, int vector_len);
1480 void evmovdqul(XMMRegister dst, XMMRegister src, int vector_len);
1481 void evmovdquq(Address dst, XMMRegister src, int vector_len);
1482 void evmovdquq(XMMRegister dst, Address src, int vector_len);
1483 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len);
1484
1485 // Move lower 64bit to high 64bit in 128bit register
1486 void movlhps(XMMRegister dst, XMMRegister src);
1487
1488 void movl(Register dst, int32_t imm32);
1489 void movl(Address dst, int32_t imm32);
1490 void movl(Register dst, Register src);
1491 void movl(Register dst, Address src);
1492 void movl(Address dst, Register src);
1493
1494 // These dummies prevent using movl from converting a zero (like NULL) into Register
1495 // by giving the compiler two choices it can't resolve
1496
1497 void movl(Address dst, void* junk);
1498 void movl(Register dst, void* junk);
1499
1500 #ifdef _LP64
1501 void movq(Register dst, Register src);
1502 void movq(Register dst, Address src);
1503 void movq(Address dst, Register src);
1504 #endif
1505
1506 void movq(Address dst, MMXRegister src );
1507 void movq(MMXRegister dst, Address src );
1508
1509 #ifdef _LP64
1510 // These dummies prevent using movq from converting a zero (like NULL) into Register
1511 // by giving the compiler two choices it can't resolve
1512
1513 void movq(Address dst, void* dummy);
1514 void movq(Register dst, void* dummy);
1515 #endif
1516
1517 // Move Quadword
1518 void movq(Address dst, XMMRegister src);
1519 void movq(XMMRegister dst, Address src);
1520
1521 void movsbl(Register dst, Address src);
1522 void movsbl(Register dst, Register src);
1523
1524 #ifdef _LP64
1525 void movsbq(Register dst, Address src);
1526 void movsbq(Register dst, Register src);
1527
1528 // Move signed 32bit immediate to 64bit extending sign
1529 void movslq(Address dst, int32_t imm64);
1530 void movslq(Register dst, int32_t imm64);
1531
1532 void movslq(Register dst, Address src);
1533 void movslq(Register dst, Register src);
1534 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
1535 #endif
1536
1537 void movswl(Register dst, Address src);
1538 void movswl(Register dst, Register src);
1539
1540 #ifdef _LP64
1541 void movswq(Register dst, Address src);
1542 void movswq(Register dst, Register src);
1543 #endif
1544
1545 void movw(Address dst, int imm16);
1546 void movw(Register dst, Address src);
1547 void movw(Address dst, Register src);
1548
1549 void movzbl(Register dst, Address src);
1550 void movzbl(Register dst, Register src);
1551
1552 #ifdef _LP64
1553 void movzbq(Register dst, Address src);
1554 void movzbq(Register dst, Register src);
1555 #endif
1556
1557 void movzwl(Register dst, Address src);
1558 void movzwl(Register dst, Register src);
1559
1560 #ifdef _LP64
1561 void movzwq(Register dst, Address src);
1562 void movzwq(Register dst, Register src);
1563 #endif
1564
1565 // Unsigned multiply with RAX destination register
1566 void mull(Address src);
1567 void mull(Register src);
1568
1569 #ifdef _LP64
1570 void mulq(Address src);
1571 void mulq(Register src);
1572 void mulxq(Register dst1, Register dst2, Register src);
1573 #endif
1574
1575 // Multiply Scalar Double-Precision Floating-Point Values
1576 void mulsd(XMMRegister dst, Address src);
1577 void mulsd(XMMRegister dst, XMMRegister src);
1578
1579 // Multiply Scalar Single-Precision Floating-Point Values
1580 void mulss(XMMRegister dst, Address src);
1581 void mulss(XMMRegister dst, XMMRegister src);
1582
1583 void negl(Register dst);
1584
1585 #ifdef _LP64
1586 void negq(Register dst);
1587 #endif
1588
1589 void nop(int i = 1);
1590
1591 void notl(Register dst);
1592
1593 #ifdef _LP64
1594 void notq(Register dst);
1595
1596 void btsq(Address dst, int imm8);
1597 void btrq(Address dst, int imm8);
1598 #endif
1599
1600 void orl(Address dst, int32_t imm32);
1601 void orl(Register dst, int32_t imm32);
1602 void orl(Register dst, Address src);
1603 void orl(Register dst, Register src);
1604 void orl(Address dst, Register src);
1605
1606 void orb(Address dst, int imm8);
1607
1608 void orq(Address dst, int32_t imm32);
1609 void orq(Register dst, int32_t imm32);
1610 void orq(Register dst, Address src);
1611 void orq(Register dst, Register src);
1612
1613 // Pack with unsigned saturation
1614 void packuswb(XMMRegister dst, XMMRegister src);
1615 void packuswb(XMMRegister dst, Address src);
1616 void vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1617
1618 // Pemutation of 64bit words
1619 void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
1620 void vpermq(XMMRegister dst, XMMRegister src, int imm8);
1621 void vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1622 void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
1623 void vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
1624 void evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1625
1626 void pause();
1627
1628 // Undefined Instruction
1629 void ud2();
1630
1631 // SSE4.2 string instructions
1632 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
1633 void pcmpestri(XMMRegister xmm1, Address src, int imm8);
1634
1635 void pcmpeqb(XMMRegister dst, XMMRegister src);
1636 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1637 void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1638 void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1639 void evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1640
1641 void evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1642 void evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len);
1643
1644 void evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len);
1645 void evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate of, int vector_len);
1646 void evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len);
1647
1648 void pcmpeqw(XMMRegister dst, XMMRegister src);
1649 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1650 void evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1651 void evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1652
1653 void pcmpeqd(XMMRegister dst, XMMRegister src);
1654 void vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1655 void evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1656 void evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1657
1658 void pcmpeqq(XMMRegister dst, XMMRegister src);
1659 void vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1660 void evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
1661 void evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len);
1662
1663 void pmovmskb(Register dst, XMMRegister src);
1664 void vpmovmskb(Register dst, XMMRegister src);
1665
1666 // SSE 4.1 extract
1667 void pextrd(Register dst, XMMRegister src, int imm8);
1668 void pextrq(Register dst, XMMRegister src, int imm8);
1669 void pextrd(Address dst, XMMRegister src, int imm8);
1670 void pextrq(Address dst, XMMRegister src, int imm8);
1671 void pextrb(Address dst, XMMRegister src, int imm8);
1672 // SSE 2 extract
1673 void pextrw(Register dst, XMMRegister src, int imm8);
1674 void pextrw(Address dst, XMMRegister src, int imm8);
1675
1676 // SSE 4.1 insert
1677 void pinsrd(XMMRegister dst, Register src, int imm8);
1678 void pinsrq(XMMRegister dst, Register src, int imm8);
1679 void pinsrd(XMMRegister dst, Address src, int imm8);
1680 void pinsrq(XMMRegister dst, Address src, int imm8);
1681 void pinsrb(XMMRegister dst, Address src, int imm8);
1682 // SSE 2 insert
1683 void pinsrw(XMMRegister dst, Register src, int imm8);
1684 void pinsrw(XMMRegister dst, Address src, int imm8);
1685
1686 // SSE4.1 packed move
1687 void pmovzxbw(XMMRegister dst, XMMRegister src);
1688 void pmovzxbw(XMMRegister dst, Address src);
1689
1690 void vpmovzxbw( XMMRegister dst, Address src, int vector_len);
1691 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len);
1692 void evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len);
1693
1694 void evpmovwb(Address dst, XMMRegister src, int vector_len);
1695 void evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len);
1696
1697 void vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len);
1698
1699 void evpmovdb(Address dst, XMMRegister src, int vector_len);
1700
1701 // Sign extend moves
1702 void pmovsxbw(XMMRegister dst, XMMRegister src);
1703 void vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len);
1704
1705 // Multiply add
1706 void pmaddwd(XMMRegister dst, XMMRegister src);
1707 void vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1708 // Multiply add accumulate
1709 void evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1710
1711 #ifndef _LP64 // no 32bit push/pop on amd64
1712 void popl(Address dst);
1713 #endif
1714
1715 #ifdef _LP64
1716 void popq(Address dst);
1717 #endif
1718
1719 void popcntl(Register dst, Address src);
1720 void popcntl(Register dst, Register src);
1721
1722 void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len);
1723
1724 #ifdef _LP64
1725 void popcntq(Register dst, Address src);
1726 void popcntq(Register dst, Register src);
1727 #endif
1728
1729 // Prefetches (SSE, SSE2, 3DNOW only)
1730
1731 void prefetchnta(Address src);
1732 void prefetchr(Address src);
1733 void prefetcht0(Address src);
1734 void prefetcht1(Address src);
1735 void prefetcht2(Address src);
1736 void prefetchw(Address src);
1737
1738 // Shuffle Bytes
1739 void pshufb(XMMRegister dst, XMMRegister src);
1740 void pshufb(XMMRegister dst, Address src);
1741 void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1742
1743 // Shuffle Packed Doublewords
1744 void pshufd(XMMRegister dst, XMMRegister src, int mode);
1745 void pshufd(XMMRegister dst, Address src, int mode);
1746 void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len);
1747
1748 // Shuffle Packed Low Words
1749 void pshuflw(XMMRegister dst, XMMRegister src, int mode);
1750 void pshuflw(XMMRegister dst, Address src, int mode);
1751
1752 // Shuffle packed values at 128 bit granularity
1753 void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
1754
1755 // Shift Right by bytes Logical DoubleQuadword Immediate
1756 void psrldq(XMMRegister dst, int shift);
1757 // Shift Left by bytes Logical DoubleQuadword Immediate
1758 void pslldq(XMMRegister dst, int shift);
1759
1760 // Logical Compare 128bit
1761 void ptest(XMMRegister dst, XMMRegister src);
1762 void ptest(XMMRegister dst, Address src);
1763 // Logical Compare 256bit
1764 void vptest(XMMRegister dst, XMMRegister src);
1765 void vptest(XMMRegister dst, Address src);
1766
1767 // Interleave Low Bytes
1768 void punpcklbw(XMMRegister dst, XMMRegister src);
1769 void punpcklbw(XMMRegister dst, Address src);
1770
1771 // Interleave Low Doublewords
1772 void punpckldq(XMMRegister dst, XMMRegister src);
1773 void punpckldq(XMMRegister dst, Address src);
1774
1775 // Interleave Low Quadwords
1776 void punpcklqdq(XMMRegister dst, XMMRegister src);
1777
1778 #ifndef _LP64 // no 32bit push/pop on amd64
1779 void pushl(Address src);
1780 #endif
1781
1782 void pushq(Address src);
1783
1784 void rcll(Register dst, int imm8);
1785
1786 void rclq(Register dst, int imm8);
1787
1788 void rcrq(Register dst, int imm8);
1789
1790 void rcpps(XMMRegister dst, XMMRegister src);
1791
1792 void rcpss(XMMRegister dst, XMMRegister src);
1793
1794 void rdtsc();
1795
1796 void ret(int imm16);
1797
1798 #ifdef _LP64
1799 void rorq(Register dst, int imm8);
1800 void rorxq(Register dst, Register src, int imm8);
1801 void rorxd(Register dst, Register src, int imm8);
1802 #endif
1803
1804 void sahf();
1805
1806 void sarl(Register dst, int imm8);
1807 void sarl(Register dst);
1808
1809 void sarq(Register dst, int imm8);
1810 void sarq(Register dst);
1811
1812 void sbbl(Address dst, int32_t imm32);
1813 void sbbl(Register dst, int32_t imm32);
1814 void sbbl(Register dst, Address src);
1815 void sbbl(Register dst, Register src);
1816
1817 void sbbq(Address dst, int32_t imm32);
1818 void sbbq(Register dst, int32_t imm32);
1819 void sbbq(Register dst, Address src);
1820 void sbbq(Register dst, Register src);
1821
1822 void setb(Condition cc, Register dst);
1823
1824 void palignr(XMMRegister dst, XMMRegister src, int imm8);
1825 void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
1826 void evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
1827
1828 void pblendw(XMMRegister dst, XMMRegister src, int imm8);
1829
1830 void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
1831 void sha1nexte(XMMRegister dst, XMMRegister src);
1832 void sha1msg1(XMMRegister dst, XMMRegister src);
1833 void sha1msg2(XMMRegister dst, XMMRegister src);
1834 // xmm0 is implicit additional source to the following instruction.
1835 void sha256rnds2(XMMRegister dst, XMMRegister src);
1836 void sha256msg1(XMMRegister dst, XMMRegister src);
1837 void sha256msg2(XMMRegister dst, XMMRegister src);
1838
1839 void shldl(Register dst, Register src);
1840 void shldl(Register dst, Register src, int8_t imm8);
1841
1842 void shll(Register dst, int imm8);
1843 void shll(Register dst);
1844
1845 void shlq(Register dst, int imm8);
1846 void shlq(Register dst);
1847
1848 void shrdl(Register dst, Register src);
1849
1850 void shrl(Register dst, int imm8);
1851 void shrl(Register dst);
1852
1853 void shrq(Register dst, int imm8);
1854 void shrq(Register dst);
1855
1856 void smovl(); // QQQ generic?
1857
1858 // Compute Square Root of Scalar Double-Precision Floating-Point Value
1859 void sqrtsd(XMMRegister dst, Address src);
1860 void sqrtsd(XMMRegister dst, XMMRegister src);
1861
1862 void roundsd(XMMRegister dst, Address src, int32_t rmode);
1863 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode);
1864
1865 // Compute Square Root of Scalar Single-Precision Floating-Point Value
1866 void sqrtss(XMMRegister dst, Address src);
1867 void sqrtss(XMMRegister dst, XMMRegister src);
1868
1869 void std();
1870
1871 void stmxcsr( Address dst );
1872
1873 void subl(Address dst, int32_t imm32);
1874 void subl(Address dst, Register src);
1875 void subl(Register dst, int32_t imm32);
1876 void subl(Register dst, Address src);
1877 void subl(Register dst, Register src);
1878
1879 void subq(Address dst, int32_t imm32);
1880 void subq(Address dst, Register src);
1881 void subq(Register dst, int32_t imm32);
1882 void subq(Register dst, Address src);
1883 void subq(Register dst, Register src);
1884
1885 // Force generation of a 4 byte immediate value even if it fits into 8bit
1886 void subl_imm32(Register dst, int32_t imm32);
1887 void subq_imm32(Register dst, int32_t imm32);
1888
1889 // Subtract Scalar Double-Precision Floating-Point Values
1890 void subsd(XMMRegister dst, Address src);
1891 void subsd(XMMRegister dst, XMMRegister src);
1892
1893 // Subtract Scalar Single-Precision Floating-Point Values
1894 void subss(XMMRegister dst, Address src);
1895 void subss(XMMRegister dst, XMMRegister src);
1896
1897 void testb(Register dst, int imm8);
1898 void testb(Address dst, int imm8);
1899
1900 void testl(Register dst, int32_t imm32);
1901 void testl(Register dst, Register src);
1902 void testl(Register dst, Address src);
1903
1904 void testq(Register dst, int32_t imm32);
1905 void testq(Register dst, Register src);
1906 void testq(Register dst, Address src);
1907
1908 // BMI - count trailing zeros
1909 void tzcntl(Register dst, Register src);
1910 void tzcntq(Register dst, Register src);
1911
1912 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
1913 void ucomisd(XMMRegister dst, Address src);
1914 void ucomisd(XMMRegister dst, XMMRegister src);
1915
1916 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
1917 void ucomiss(XMMRegister dst, Address src);
1918 void ucomiss(XMMRegister dst, XMMRegister src);
1919
1920 void xabort(int8_t imm8);
1921
1922 void xaddb(Address dst, Register src);
1923 void xaddw(Address dst, Register src);
1924 void xaddl(Address dst, Register src);
1925 void xaddq(Address dst, Register src);
1926
1927 void xbegin(Label& abort, relocInfo::relocType rtype = relocInfo::none);
1928
1929 void xchgb(Register reg, Address adr);
1930 void xchgw(Register reg, Address adr);
1931 void xchgl(Register reg, Address adr);
1932 void xchgl(Register dst, Register src);
1933
1934 void xchgq(Register reg, Address adr);
1935 void xchgq(Register dst, Register src);
1936
1937 void xend();
1938
1939 // Get Value of Extended Control Register
1940 void xgetbv();
1941
1942 void xorl(Register dst, int32_t imm32);
1943 void xorl(Register dst, Address src);
1944 void xorl(Register dst, Register src);
1945
1946 void xorb(Register dst, Address src);
1947
1948 void xorq(Register dst, Address src);
1949 void xorq(Register dst, Register src);
1950
1951 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
1952
1953 // AVX 3-operands scalar instructions (encoded with VEX prefix)
1954
1955 void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
1956 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1957 void vaddss(XMMRegister dst, XMMRegister nds, Address src);
1958 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1959 void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
1960 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1961 void vdivss(XMMRegister dst, XMMRegister nds, Address src);
1962 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1963 void vfmadd231sd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1964 void vfmadd231ss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1965 void vmulsd(XMMRegister dst, XMMRegister nds, Address src);
1966 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1967 void vmulss(XMMRegister dst, XMMRegister nds, Address src);
1968 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1969 void vsubsd(XMMRegister dst, XMMRegister nds, Address src);
1970 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1971 void vsubss(XMMRegister dst, XMMRegister nds, Address src);
1972 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1973
1974 void vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1975 void vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1976 void vminss(XMMRegister dst, XMMRegister nds, XMMRegister src);
1977 void vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
1978
1979 void shlxl(Register dst, Register src1, Register src2);
1980 void shlxq(Register dst, Register src1, Register src2);
1981
1982 //====================VECTOR ARITHMETIC=====================================
1983
1984 // Add Packed Floating-Point Values
1985 void addpd(XMMRegister dst, XMMRegister src);
1986 void addpd(XMMRegister dst, Address src);
1987 void addps(XMMRegister dst, XMMRegister src);
1988 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1989 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1990 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1991 void vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1992
1993 // Subtract Packed Floating-Point Values
1994 void subpd(XMMRegister dst, XMMRegister src);
1995 void subps(XMMRegister dst, XMMRegister src);
1996 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1997 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
1998 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
1999 void vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2000
2001 // Multiply Packed Floating-Point Values
2002 void mulpd(XMMRegister dst, XMMRegister src);
2003 void mulpd(XMMRegister dst, Address src);
2004 void mulps(XMMRegister dst, XMMRegister src);
2005 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2006 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2007 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2008 void vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2009
2010 void vfmadd231pd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2011 void vfmadd231ps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2012 void vfmadd231pd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2013 void vfmadd231ps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2014
2015 // Divide Packed Floating-Point Values
2016 void divpd(XMMRegister dst, XMMRegister src);
2017 void divps(XMMRegister dst, XMMRegister src);
2018 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2019 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2020 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2021 void vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2022
2023 // Sqrt Packed Floating-Point Values
2024 void vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len);
2025 void vsqrtpd(XMMRegister dst, Address src, int vector_len);
2026 void vsqrtps(XMMRegister dst, XMMRegister src, int vector_len);
2027 void vsqrtps(XMMRegister dst, Address src, int vector_len);
2028
2029 // Round Packed Double precision value.
2030 void vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
2031 void vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
2032 void vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len);
2033 void vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len);
2034
2035 // Bitwise Logical AND of Packed Floating-Point Values
2036 void andpd(XMMRegister dst, XMMRegister src);
2037 void andps(XMMRegister dst, XMMRegister src);
2038 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2039 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2040 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2041 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2042
2043 void unpckhpd(XMMRegister dst, XMMRegister src);
2044 void unpcklpd(XMMRegister dst, XMMRegister src);
2045
2046 // Bitwise Logical XOR of Packed Floating-Point Values
2047 void xorpd(XMMRegister dst, XMMRegister src);
2048 void xorps(XMMRegister dst, XMMRegister src);
2049 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2050 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2051 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2052 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2053
2054 // Add horizontal packed integers
2055 void vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2056 void vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2057 void phaddw(XMMRegister dst, XMMRegister src);
2058 void phaddd(XMMRegister dst, XMMRegister src);
2059
2060 // Add packed integers
2061 void paddb(XMMRegister dst, XMMRegister src);
2062 void paddw(XMMRegister dst, XMMRegister src);
2063 void paddd(XMMRegister dst, XMMRegister src);
2064 void paddd(XMMRegister dst, Address src);
2065 void paddq(XMMRegister dst, XMMRegister src);
2066 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2067 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2068 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2069 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2070 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2071 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2072 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2073 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2074
2075 // Sub packed integers
2076 void psubb(XMMRegister dst, XMMRegister src);
2077 void psubw(XMMRegister dst, XMMRegister src);
2078 void psubd(XMMRegister dst, XMMRegister src);
2079 void psubq(XMMRegister dst, XMMRegister src);
2080 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2081 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2082 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2083 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2084 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2085 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2086 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2087 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2088
2089 // Multiply packed integers (only shorts and ints)
2090 void pmullw(XMMRegister dst, XMMRegister src);
2091 void pmulld(XMMRegister dst, XMMRegister src);
2092 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2093 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2094 void vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2095 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2096 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2097 void vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2098
2099 // Shift left packed integers
2100 void psllw(XMMRegister dst, int shift);
2101 void pslld(XMMRegister dst, int shift);
2102 void psllq(XMMRegister dst, int shift);
2103 void psllw(XMMRegister dst, XMMRegister shift);
2104 void pslld(XMMRegister dst, XMMRegister shift);
2105 void psllq(XMMRegister dst, XMMRegister shift);
2106 void vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2107 void vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2108 void vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2109 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2110 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2111 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2112 void vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2113
2114 // Logical shift right packed integers
2115 void psrlw(XMMRegister dst, int shift);
2116 void psrld(XMMRegister dst, int shift);
2117 void psrlq(XMMRegister dst, int shift);
2118 void psrlw(XMMRegister dst, XMMRegister shift);
2119 void psrld(XMMRegister dst, XMMRegister shift);
2120 void psrlq(XMMRegister dst, XMMRegister shift);
2121 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2122 void vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2123 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2124 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2125 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2126 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2127 void vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2128 void evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2129 void evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2130
2131 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs)
2132 void psraw(XMMRegister dst, int shift);
2133 void psrad(XMMRegister dst, int shift);
2134 void psraw(XMMRegister dst, XMMRegister shift);
2135 void psrad(XMMRegister dst, XMMRegister shift);
2136 void vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2137 void vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2138 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2139 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2140 void evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len);
2141 void evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len);
2142
2143 // And packed integers
2144 void pand(XMMRegister dst, XMMRegister src);
2145 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2146 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2147 void vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2148
2149 // Andn packed integers
2150 void pandn(XMMRegister dst, XMMRegister src);
2151 void vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2152
2153 // Or packed integers
2154 void por(XMMRegister dst, XMMRegister src);
2155 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2156 void vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2157 void vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2158
2159 // Xor packed integers
2160 void pxor(XMMRegister dst, XMMRegister src);
2161 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2162 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2163 void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
2164 void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
2165
2166
2167 // vinserti forms
2168 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2169 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2170 void vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2171 void vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2172 void vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2173
2174 // vinsertf forms
2175 void vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2176 void vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2177 void vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2178 void vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2179 void vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
2180 void vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8);
2181
2182 // vextracti forms
2183 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2184 void vextracti128(Address dst, XMMRegister src, uint8_t imm8);
2185 void vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2186 void vextracti32x4(Address dst, XMMRegister src, uint8_t imm8);
2187 void vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2188 void vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2189 void vextracti64x4(Address dst, XMMRegister src, uint8_t imm8);
2190
2191 // vextractf forms
2192 void vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8);
2193 void vextractf128(Address dst, XMMRegister src, uint8_t imm8);
2194 void vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2195 void vextractf32x4(Address dst, XMMRegister src, uint8_t imm8);
2196 void vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8);
2197 void vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8);
2198 void vextractf64x4(Address dst, XMMRegister src, uint8_t imm8);
2199
2200 // xmm/mem sourced byte/word/dword/qword replicate
2201 void vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
2202 void vpbroadcastb(XMMRegister dst, Address src, int vector_len);
2203 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
2204 void vpbroadcastw(XMMRegister dst, Address src, int vector_len);
2205 void vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len);
2206 void vpbroadcastd(XMMRegister dst, Address src, int vector_len);
2207 void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
2208 void vpbroadcastq(XMMRegister dst, Address src, int vector_len);
2209
2210 void evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len);
2211 void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
2212
2213 // scalar single/double precision replicate
2214 void vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
2215 void vpbroadcastss(XMMRegister dst, Address src, int vector_len);
2216 void vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
2217 void vpbroadcastsd(XMMRegister dst, Address src, int vector_len);
2218
2219 // gpr sourced byte/word/dword/qword replicate
2220 void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
2221 void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
2222 void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
2223 void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
2224
2225 void evpgatherdd(XMMRegister dst, KRegister k1, Address src, int vector_len);
2226
2227 // Carry-Less Multiplication Quadword
2228 void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
2229 void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
2230 void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len);
2231 // AVX instruction which is used to clear upper 128 bits of YMM registers and
2232 // to avoid transaction penalty between AVX and SSE states. There is no
2233 // penalty if legacy SSE instructions are encoded using VEX prefix because
2234 // they always clear upper 128 bits. It should be used before calling
2235 // runtime code and native libraries.
2236 void vzeroupper();
2237
2238 // AVX support for vectorized conditional move (float/double). The following two instructions used only coupled.
2239 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2240 void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2241 void cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
2242 void blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
2243 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
2244
2245 protected:
2246 // Next instructions require address alignment 16 bytes SSE mode.
2247 // They should be called only from corresponding MacroAssembler instructions.
2248 void andpd(XMMRegister dst, Address src);
2249 void andps(XMMRegister dst, Address src);
2250 void xorpd(XMMRegister dst, Address src);
2251 void xorps(XMMRegister dst, Address src);
2252
2253 };
2254
2255 // The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
2256 // Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
2257 // are applied.
2258 class InstructionAttr {
2259 public:
2260 InstructionAttr(
2261 int vector_len, // The length of vector to be applied in encoding - for both AVX and EVEX
2262 bool rex_vex_w, // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
2263 bool legacy_mode, // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
2264 bool no_reg_mask, // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used
2265 bool uses_vl) // This instruction may have legacy constraints based on vector length for EVEX
2266 :
2267 _avx_vector_len(vector_len),
2268 _rex_vex_w(rex_vex_w),
2269 _rex_vex_w_reverted(false),
2270 _legacy_mode(legacy_mode),
2271 _no_reg_mask(no_reg_mask),
2272 _uses_vl(uses_vl),
2273 _tuple_type(Assembler::EVEX_ETUP),
2274 _input_size_in_bits(Assembler::EVEX_NObit),
2275 _is_evex_instruction(false),
2276 _evex_encoding(0),
2277 _is_clear_context(true),
2278 _is_extended_context(false),
2279 _embedded_opmask_register_specifier(0), // hard code k0
2280 _current_assembler(NULL) {
2281 if (UseAVX < 3) _legacy_mode = true;
2282 }
2283
2284 ~InstructionAttr() {
2285 if (_current_assembler != NULL) {
2286 _current_assembler->clear_attributes();
2287 }
2288 _current_assembler = NULL;
2289 }
2290
2291 private:
2292 int _avx_vector_len;
2293 bool _rex_vex_w;
2294 bool _rex_vex_w_reverted;
2295 bool _legacy_mode;
2296 bool _no_reg_mask;
2297 bool _uses_vl;
2298 int _tuple_type;
2299 int _input_size_in_bits;
2300 bool _is_evex_instruction;
2301 int _evex_encoding;
2302 bool _is_clear_context;
2303 bool _is_extended_context;
2304 int _embedded_opmask_register_specifier;
2305
2306 Assembler *_current_assembler;
2307
2308 public:
2309 // query functions for field accessors
2310 int get_vector_len(void) const { return _avx_vector_len; }
2311 bool is_rex_vex_w(void) const { return _rex_vex_w; }
2312 bool is_rex_vex_w_reverted(void) { return _rex_vex_w_reverted; }
2313 bool is_legacy_mode(void) const { return _legacy_mode; }
2314 bool is_no_reg_mask(void) const { return _no_reg_mask; }
2315 bool uses_vl(void) const { return _uses_vl; }
2316 int get_tuple_type(void) const { return _tuple_type; }
2317 int get_input_size(void) const { return _input_size_in_bits; }
2318 int is_evex_instruction(void) const { return _is_evex_instruction; }
2319 int get_evex_encoding(void) const { return _evex_encoding; }
2320 bool is_clear_context(void) const { return _is_clear_context; }
2321 bool is_extended_context(void) const { return _is_extended_context; }
2322 int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
2323
2324 // Set the vector len manually
2325 void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
2326
2327 // Set revert rex_vex_w for avx encoding
2328 void set_rex_vex_w_reverted(void) { _rex_vex_w_reverted = true; }
2329
2330 // Set rex_vex_w based on state
2331 void set_rex_vex_w(bool state) { _rex_vex_w = state; }
2332
2333 // Set the instruction to be encoded in AVX mode
2334 void set_is_legacy_mode(void) { _legacy_mode = true; }
2335
2336 // Set the current instuction to be encoded as an EVEX instuction
2337 void set_is_evex_instruction(void) { _is_evex_instruction = true; }
2338
2339 // Internal encoding data used in compressed immediate offset programming
2340 void set_evex_encoding(int value) { _evex_encoding = value; }
2341
2342 // Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
2343 void reset_is_clear_context(void) { _is_clear_context = false; }
2344
2345 // Map back to current asembler so that we can manage object level assocation
2346 void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
2347
2348 // Address modifiers used for compressed displacement calculation
2349 void set_address_attributes(int tuple_type, int input_size_in_bits) {
2350 if (VM_Version::supports_evex()) {
2351 _tuple_type = tuple_type;
2352 _input_size_in_bits = input_size_in_bits;
2353 }
2354 }
2355
2356 // Set embedded opmask register specifier.
2357 void set_embedded_opmask_register_specifier(KRegister mask) {
2358 _embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
2359 }
2360
2361 };
2362
2363 #endif // CPU_X86_ASSEMBLER_X86_HPP
--- EOF ---