1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 friend class Runtime1; // as_Address() 40 41 public: 42 // Support for VM calls 43 // 44 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 45 // may customize this version by overriding it for its purposes (e.g., to save/restore 46 // additional registers when doing a VM call). 47 48 virtual void call_VM_leaf_base( 49 address entry_point, // the entry point 50 int number_of_arguments // the number of arguments to pop after the call 51 ); 52 53 protected: 54 // This is the base routine called by the different versions of call_VM. The interpreter 55 // may customize this version by overriding it for its purposes (e.g., to save/restore 56 // additional registers when doing a VM call). 57 // 58 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 59 // returns the register which contains the thread upon return. If a thread register has been 60 // specified, the return value will correspond to that register. If no last_java_sp is specified 61 // (noreg) than rsp will be used instead. 62 virtual void call_VM_base( // returns the register containing the thread upon return 63 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 64 Register java_thread, // the thread if computed before ; use noreg otherwise 65 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 66 address entry_point, // the entry point 67 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 68 bool check_exceptions // whether to check for pending exceptions after return 69 ); 70 71 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 72 73 // helpers for FPU flag access 74 // tmp is a temporary register, if none is available use noreg 75 void save_rax (Register tmp); 76 void restore_rax(Register tmp); 77 78 public: 79 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 80 81 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 82 // The implementation is only non-empty for the InterpreterMacroAssembler, 83 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 84 virtual void check_and_handle_popframe(Register java_thread); 85 virtual void check_and_handle_earlyret(Register java_thread); 86 87 Address as_Address(AddressLiteral adr); 88 Address as_Address(ArrayAddress adr); 89 90 // Support for NULL-checks 91 // 92 // Generates code that causes a NULL OS exception if the content of reg is NULL. 93 // If the accessed location is M[reg + offset] and the offset is known, provide the 94 // offset. No explicit code generation is needed if the offset is within a certain 95 // range (0 <= offset <= page_size). 96 97 void null_check(Register reg, int offset = -1); 98 static bool needs_explicit_null_check(intptr_t offset); 99 static bool uses_implicit_null_check(void* address); 100 101 // Required platform-specific helpers for Label::patch_instructions. 102 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 103 void pd_patch_instruction(address branch, address target, const char* file, int line) { 104 unsigned char op = branch[0]; 105 assert(op == 0xE8 /* call */ || 106 op == 0xE9 /* jmp */ || 107 op == 0xEB /* short jmp */ || 108 (op & 0xF0) == 0x70 /* short jcc */ || 109 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 110 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 111 "Invalid opcode at patch point"); 112 113 if (op == 0xEB || (op & 0xF0) == 0x70) { 114 // short offset operators (jmp and jcc) 115 char* disp = (char*) &branch[1]; 116 int imm8 = target - (address) &disp[1]; 117 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 118 file == NULL ? "<NULL>" : file, line); 119 *disp = imm8; 120 } else { 121 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 122 int imm32 = target - (address) &disp[1]; 123 *disp = imm32; 124 } 125 } 126 127 // The following 4 methods return the offset of the appropriate move instruction 128 129 // Support for fast byte/short loading with zero extension (depending on particular CPU) 130 int load_unsigned_byte(Register dst, Address src); 131 int load_unsigned_short(Register dst, Address src); 132 133 // Support for fast byte/short loading with sign extension (depending on particular CPU) 134 int load_signed_byte(Register dst, Address src); 135 int load_signed_short(Register dst, Address src); 136 137 // Support for sign-extension (hi:lo = extend_sign(lo)) 138 void extend_sign(Register hi, Register lo); 139 140 // Load and store values by size and signed-ness 141 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 142 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 143 144 // Support for inc/dec with optimal instruction selection depending on value 145 146 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 147 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 148 149 void decrementl(Address dst, int value = 1); 150 void decrementl(Register reg, int value = 1); 151 152 void decrementq(Register reg, int value = 1); 153 void decrementq(Address dst, int value = 1); 154 155 void incrementl(Address dst, int value = 1); 156 void incrementl(Register reg, int value = 1); 157 158 void incrementq(Register reg, int value = 1); 159 void incrementq(Address dst, int value = 1); 160 161 #ifdef COMPILER2 162 // special instructions for EVEX 163 void setvectmask(Register dst, Register src); 164 void restorevectmask(); 165 #endif 166 167 // Support optimal SSE move instructions. 168 void movflt(XMMRegister dst, XMMRegister src) { 169 if (dst-> encoding() == src->encoding()) return; 170 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 171 else { movss (dst, src); return; } 172 } 173 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 174 void movflt(XMMRegister dst, AddressLiteral src); 175 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 176 177 void movdbl(XMMRegister dst, XMMRegister src) { 178 if (dst-> encoding() == src->encoding()) return; 179 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 180 else { movsd (dst, src); return; } 181 } 182 183 void movdbl(XMMRegister dst, AddressLiteral src); 184 185 void movdbl(XMMRegister dst, Address src) { 186 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 187 else { movlpd(dst, src); return; } 188 } 189 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 190 191 void incrementl(AddressLiteral dst); 192 void incrementl(ArrayAddress dst); 193 194 void incrementq(AddressLiteral dst); 195 196 // Alignment 197 void align(int modulus); 198 void align(int modulus, int target); 199 200 // A 5 byte nop that is safe for patching (see patch_verified_entry) 201 void fat_nop(); 202 203 // Stack frame creation/removal 204 void enter(); 205 void leave(); 206 207 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 208 // The pointer will be loaded into the thread register. 209 void get_thread(Register thread); 210 211 212 // Support for VM calls 213 // 214 // It is imperative that all calls into the VM are handled via the call_VM macros. 215 // They make sure that the stack linkage is setup correctly. call_VM's correspond 216 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 217 218 219 void call_VM(Register oop_result, 220 address entry_point, 221 bool check_exceptions = true); 222 void call_VM(Register oop_result, 223 address entry_point, 224 Register arg_1, 225 bool check_exceptions = true); 226 void call_VM(Register oop_result, 227 address entry_point, 228 Register arg_1, Register arg_2, 229 bool check_exceptions = true); 230 void call_VM(Register oop_result, 231 address entry_point, 232 Register arg_1, Register arg_2, Register arg_3, 233 bool check_exceptions = true); 234 235 // Overloadings with last_Java_sp 236 void call_VM(Register oop_result, 237 Register last_java_sp, 238 address entry_point, 239 int number_of_arguments = 0, 240 bool check_exceptions = true); 241 void call_VM(Register oop_result, 242 Register last_java_sp, 243 address entry_point, 244 Register arg_1, bool 245 check_exceptions = true); 246 void call_VM(Register oop_result, 247 Register last_java_sp, 248 address entry_point, 249 Register arg_1, Register arg_2, 250 bool check_exceptions = true); 251 void call_VM(Register oop_result, 252 Register last_java_sp, 253 address entry_point, 254 Register arg_1, Register arg_2, Register arg_3, 255 bool check_exceptions = true); 256 257 void get_vm_result (Register oop_result, Register thread); 258 void get_vm_result_2(Register metadata_result, Register thread); 259 260 // These always tightly bind to MacroAssembler::call_VM_base 261 // bypassing the virtual implementation 262 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 263 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 264 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 265 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 266 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 267 268 void call_VM_leaf0(address entry_point); 269 void call_VM_leaf(address entry_point, 270 int number_of_arguments = 0); 271 void call_VM_leaf(address entry_point, 272 Register arg_1); 273 void call_VM_leaf(address entry_point, 274 Register arg_1, Register arg_2); 275 void call_VM_leaf(address entry_point, 276 Register arg_1, Register arg_2, Register arg_3); 277 278 // These always tightly bind to MacroAssembler::call_VM_leaf_base 279 // bypassing the virtual implementation 280 void super_call_VM_leaf(address entry_point); 281 void super_call_VM_leaf(address entry_point, Register arg_1); 282 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 283 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 284 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 285 286 // last Java Frame (fills frame anchor) 287 void set_last_Java_frame(Register thread, 288 Register last_java_sp, 289 Register last_java_fp, 290 address last_java_pc); 291 292 // thread in the default location (r15_thread on 64bit) 293 void set_last_Java_frame(Register last_java_sp, 294 Register last_java_fp, 295 address last_java_pc); 296 297 void reset_last_Java_frame(Register thread, bool clear_fp); 298 299 // thread in the default location (r15_thread on 64bit) 300 void reset_last_Java_frame(bool clear_fp); 301 302 // jobjects 303 void clear_jweak_tag(Register possibly_jweak); 304 void resolve_jobject(Register value, Register thread, Register tmp); 305 306 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 307 void c2bool(Register x); 308 309 // C++ bool manipulation 310 311 void movbool(Register dst, Address src); 312 void movbool(Address dst, bool boolconst); 313 void movbool(Address dst, Register src); 314 void testbool(Register dst); 315 316 void resolve_oop_handle(Register result, Register tmp = rscratch2); 317 void resolve_weak_handle(Register result, Register tmp); 318 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 319 void load_method_holder_cld(Register rresult, Register rmethod); 320 321 void load_method_holder(Register holder, Register method); 322 323 // oop manipulations 324 void load_klass(Register dst, Register src); 325 void store_klass(Register dst, Register src); 326 327 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 328 Register tmp1, Register thread_tmp); 329 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 330 Register tmp1, Register tmp2); 331 332 // Resolves obj access. Result is placed in the same register. 333 // All other registers are preserved. 334 void resolve(DecoratorSet decorators, Register obj); 335 336 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 337 Register thread_tmp = noreg, DecoratorSet decorators = 0); 338 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 339 Register thread_tmp = noreg, DecoratorSet decorators = 0); 340 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 341 Register tmp2 = noreg, DecoratorSet decorators = 0); 342 343 // Used for storing NULL. All other oop constants should be 344 // stored using routines that take a jobject. 345 void store_heap_oop_null(Address dst); 346 347 void load_prototype_header(Register dst, Register src); 348 349 #ifdef _LP64 350 void store_klass_gap(Register dst, Register src); 351 352 // This dummy is to prevent a call to store_heap_oop from 353 // converting a zero (like NULL) into a Register by giving 354 // the compiler two choices it can't resolve 355 356 void store_heap_oop(Address dst, void* dummy); 357 358 void encode_heap_oop(Register r); 359 void decode_heap_oop(Register r); 360 void encode_heap_oop_not_null(Register r); 361 void decode_heap_oop_not_null(Register r); 362 void encode_heap_oop_not_null(Register dst, Register src); 363 void decode_heap_oop_not_null(Register dst, Register src); 364 365 void set_narrow_oop(Register dst, jobject obj); 366 void set_narrow_oop(Address dst, jobject obj); 367 void cmp_narrow_oop(Register dst, jobject obj); 368 void cmp_narrow_oop(Address dst, jobject obj); 369 370 void encode_klass_not_null(Register r); 371 void decode_klass_not_null(Register r); 372 void encode_klass_not_null(Register dst, Register src); 373 void decode_klass_not_null(Register dst, Register src); 374 void set_narrow_klass(Register dst, Klass* k); 375 void set_narrow_klass(Address dst, Klass* k); 376 void cmp_narrow_klass(Register dst, Klass* k); 377 void cmp_narrow_klass(Address dst, Klass* k); 378 379 // Returns the byte size of the instructions generated by decode_klass_not_null() 380 // when compressed klass pointers are being used. 381 static int instr_size_for_decode_klass_not_null(); 382 383 // if heap base register is used - reinit it with the correct value 384 void reinit_heapbase(); 385 386 DEBUG_ONLY(void verify_heapbase(const char* msg);) 387 388 #endif // _LP64 389 390 // Int division/remainder for Java 391 // (as idivl, but checks for special case as described in JVM spec.) 392 // returns idivl instruction offset for implicit exception handling 393 int corrected_idivl(Register reg); 394 395 // Long division/remainder for Java 396 // (as idivq, but checks for special case as described in JVM spec.) 397 // returns idivq instruction offset for implicit exception handling 398 int corrected_idivq(Register reg); 399 400 void int3(); 401 402 // Long operation macros for a 32bit cpu 403 // Long negation for Java 404 void lneg(Register hi, Register lo); 405 406 // Long multiplication for Java 407 // (destroys contents of eax, ebx, ecx and edx) 408 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 409 410 // Long shifts for Java 411 // (semantics as described in JVM spec.) 412 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 413 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 414 415 // Long compare for Java 416 // (semantics as described in JVM spec.) 417 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 418 419 420 // misc 421 422 // Sign extension 423 void sign_extend_short(Register reg); 424 void sign_extend_byte(Register reg); 425 426 // Division by power of 2, rounding towards 0 427 void division_with_shift(Register reg, int shift_value); 428 429 #ifndef _LP64 430 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 431 // 432 // CF (corresponds to C0) if x < y 433 // PF (corresponds to C2) if unordered 434 // ZF (corresponds to C3) if x = y 435 // 436 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 437 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 438 void fcmp(Register tmp); 439 // Variant of the above which allows y to be further down the stack 440 // and which only pops x and y if specified. If pop_right is 441 // specified then pop_left must also be specified. 442 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 443 444 // Floating-point comparison for Java 445 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 446 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 447 // (semantics as described in JVM spec.) 448 void fcmp2int(Register dst, bool unordered_is_less); 449 // Variant of the above which allows y to be further down the stack 450 // and which only pops x and y if specified. If pop_right is 451 // specified then pop_left must also be specified. 452 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 453 454 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 455 // tmp is a temporary register, if none is available use noreg 456 void fremr(Register tmp); 457 458 // only if +VerifyFPU 459 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 460 #endif // !LP64 461 462 // dst = c = a * b + c 463 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 464 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 465 466 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 467 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 468 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 469 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 470 471 472 // same as fcmp2int, but using SSE2 473 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 474 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 475 476 // branch to L if FPU flag C2 is set/not set 477 // tmp is a temporary register, if none is available use noreg 478 void jC2 (Register tmp, Label& L); 479 void jnC2(Register tmp, Label& L); 480 481 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 482 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 483 void load_float(Address src); 484 485 // Store float value to 'address'. If UseSSE >= 1, the value is stored 486 // from register xmm0. Otherwise, the value is stored from the FPU stack. 487 void store_float(Address dst); 488 489 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 490 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 491 void load_double(Address src); 492 493 // Store double value to 'address'. If UseSSE >= 2, the value is stored 494 // from register xmm0. Otherwise, the value is stored from the FPU stack. 495 void store_double(Address dst); 496 497 #ifndef _LP64 498 // Pop ST (ffree & fincstp combined) 499 void fpop(); 500 501 void empty_FPU_stack(); 502 #endif // !_LP64 503 504 void push_IU_state(); 505 void pop_IU_state(); 506 507 void push_FPU_state(); 508 void pop_FPU_state(); 509 510 void push_CPU_state(); 511 void pop_CPU_state(); 512 513 // Round up to a power of two 514 void round_to(Register reg, int modulus); 515 516 // Callee saved registers handling 517 void push_callee_saved_registers(); 518 void pop_callee_saved_registers(); 519 520 // allocation 521 void eden_allocate( 522 Register thread, // Current thread 523 Register obj, // result: pointer to object after successful allocation 524 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 525 int con_size_in_bytes, // object size in bytes if known at compile time 526 Register t1, // temp register 527 Label& slow_case // continuation point if fast allocation fails 528 ); 529 void tlab_allocate( 530 Register thread, // Current thread 531 Register obj, // result: pointer to object after successful allocation 532 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 533 int con_size_in_bytes, // object size in bytes if known at compile time 534 Register t1, // temp register 535 Register t2, // temp register 536 Label& slow_case // continuation point if fast allocation fails 537 ); 538 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 539 540 // interface method calling 541 void lookup_interface_method(Register recv_klass, 542 Register intf_klass, 543 RegisterOrConstant itable_index, 544 Register method_result, 545 Register scan_temp, 546 Label& no_such_interface, 547 bool return_method = true); 548 549 // virtual method calling 550 void lookup_virtual_method(Register recv_klass, 551 RegisterOrConstant vtable_index, 552 Register method_result); 553 554 // Test sub_klass against super_klass, with fast and slow paths. 555 556 // The fast path produces a tri-state answer: yes / no / maybe-slow. 557 // One of the three labels can be NULL, meaning take the fall-through. 558 // If super_check_offset is -1, the value is loaded up from super_klass. 559 // No registers are killed, except temp_reg. 560 void check_klass_subtype_fast_path(Register sub_klass, 561 Register super_klass, 562 Register temp_reg, 563 Label* L_success, 564 Label* L_failure, 565 Label* L_slow_path, 566 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 567 568 // The rest of the type check; must be wired to a corresponding fast path. 569 // It does not repeat the fast path logic, so don't use it standalone. 570 // The temp_reg and temp2_reg can be noreg, if no temps are available. 571 // Updates the sub's secondary super cache as necessary. 572 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 573 void check_klass_subtype_slow_path(Register sub_klass, 574 Register super_klass, 575 Register temp_reg, 576 Register temp2_reg, 577 Label* L_success, 578 Label* L_failure, 579 bool set_cond_codes = false); 580 581 // Simplified, combined version, good for typical uses. 582 // Falls through on failure. 583 void check_klass_subtype(Register sub_klass, 584 Register super_klass, 585 Register temp_reg, 586 Label& L_success); 587 588 void clinit_barrier(Register klass, 589 Register thread, 590 Label* L_fast_path = NULL, 591 Label* L_slow_path = NULL); 592 593 // method handles (JSR 292) 594 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 595 596 //---- 597 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 598 599 // Debugging 600 601 // only if +VerifyOops 602 // TODO: Make these macros with file and line like sparc version! 603 604 void _verify_oop(Register reg, const char* s, const char * file, int line); 605 void _verify_oop_addr(Address addr, const char* s, const char * file, int line); 606 607 // TODO: verify method and klass metadata (compare against vptr?) 608 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 609 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 610 611 #define verify_oop(reg) _verify_oop(reg, "broken oop ", __FILE__, __LINE__) 612 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__) 613 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 614 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 615 616 // Verify or restore cpu control state after JNI call 617 void restore_cpu_control_state_after_jni(); 618 619 // prints msg, dumps registers and stops execution 620 void stop(const char* msg); 621 622 // prints msg and continues 623 void warn(const char* msg); 624 625 // dumps registers and other state 626 void print_state(); 627 628 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 629 static void debug64(char* msg, int64_t pc, int64_t regs[]); 630 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 631 static void print_state64(int64_t pc, int64_t regs[]); 632 633 void os_breakpoint(); 634 635 void untested() { stop("untested"); } 636 637 void unimplemented(const char* what = ""); 638 639 void should_not_reach_here() { stop("should not reach here"); } 640 641 void print_CPU_state(); 642 643 // Stack overflow checking 644 void bang_stack_with_offset(int offset) { 645 // stack grows down, caller passes positive offset 646 assert(offset > 0, "must bang with negative offset"); 647 movl(Address(rsp, (-offset)), rax); 648 } 649 650 // Writes to stack successive pages until offset reached to check for 651 // stack overflow + shadow pages. Also, clobbers tmp 652 void bang_stack_size(Register size, Register tmp); 653 654 // Check for reserved stack access in method being exited (for JIT) 655 void reserved_stack_check(); 656 657 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 658 Register tmp, 659 int offset); 660 661 // If thread_reg is != noreg the code assumes the register passed contains 662 // the thread (required on 64 bit). 663 void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); 664 665 void verify_tlab(); 666 667 // Biased locking support 668 // lock_reg and obj_reg must be loaded up with the appropriate values. 669 // swap_reg must be rax, and is killed. 670 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 671 // be killed; if not supplied, push/pop will be used internally to 672 // allocate a temporary (inefficient, avoid if possible). 673 // Optional slow case is for implementations (interpreter and C1) which branch to 674 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 675 // Returns offset of first potentially-faulting instruction for null 676 // check info (currently consumed only by C1). If 677 // swap_reg_contains_mark is true then returns -1 as it is assumed 678 // the calling code has already passed any potential faults. 679 int biased_locking_enter(Register lock_reg, Register obj_reg, 680 Register swap_reg, Register tmp_reg, 681 bool swap_reg_contains_mark, 682 Label& done, Label* slow_case = NULL, 683 BiasedLockingCounters* counters = NULL); 684 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 685 #ifdef COMPILER2 686 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 687 // See full desription in macroAssembler_x86.cpp. 688 void fast_lock(Register obj, Register box, Register tmp, 689 Register scr, Register cx1, Register cx2, 690 BiasedLockingCounters* counters, 691 RTMLockingCounters* rtm_counters, 692 RTMLockingCounters* stack_rtm_counters, 693 Metadata* method_data, 694 bool use_rtm, bool profile_rtm); 695 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 696 #if INCLUDE_RTM_OPT 697 void rtm_counters_update(Register abort_status, Register rtm_counters); 698 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 699 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 700 RTMLockingCounters* rtm_counters, 701 Metadata* method_data); 702 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 703 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 704 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 705 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 706 void rtm_stack_locking(Register obj, Register tmp, Register scr, 707 Register retry_on_abort_count, 708 RTMLockingCounters* stack_rtm_counters, 709 Metadata* method_data, bool profile_rtm, 710 Label& DONE_LABEL, Label& IsInflated); 711 void rtm_inflated_locking(Register obj, Register box, Register tmp, 712 Register scr, Register retry_on_busy_count, 713 Register retry_on_abort_count, 714 RTMLockingCounters* rtm_counters, 715 Metadata* method_data, bool profile_rtm, 716 Label& DONE_LABEL); 717 #endif 718 #endif 719 720 Condition negate_condition(Condition cond); 721 722 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 723 // operands. In general the names are modified to avoid hiding the instruction in Assembler 724 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 725 // here in MacroAssembler. The major exception to this rule is call 726 727 // Arithmetics 728 729 730 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 731 void addptr(Address dst, Register src); 732 733 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 734 void addptr(Register dst, int32_t src); 735 void addptr(Register dst, Register src); 736 void addptr(Register dst, RegisterOrConstant src) { 737 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 738 else addptr(dst, src.as_register()); 739 } 740 741 void andptr(Register dst, int32_t src); 742 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 743 744 void cmp8(AddressLiteral src1, int imm); 745 746 // renamed to drag out the casting of address to int32_t/intptr_t 747 void cmp32(Register src1, int32_t imm); 748 749 void cmp32(AddressLiteral src1, int32_t imm); 750 // compare reg - mem, or reg - &mem 751 void cmp32(Register src1, AddressLiteral src2); 752 753 void cmp32(Register src1, Address src2); 754 755 #ifndef _LP64 756 void cmpklass(Address dst, Metadata* obj); 757 void cmpklass(Register dst, Metadata* obj); 758 void cmpoop(Address dst, jobject obj); 759 void cmpoop_raw(Address dst, jobject obj); 760 #endif // _LP64 761 762 void cmpoop(Register src1, Register src2); 763 void cmpoop(Register src1, Address src2); 764 void cmpoop(Register dst, jobject obj); 765 void cmpoop_raw(Register dst, jobject obj); 766 767 // NOTE src2 must be the lval. This is NOT an mem-mem compare 768 void cmpptr(Address src1, AddressLiteral src2); 769 770 void cmpptr(Register src1, AddressLiteral src2); 771 772 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 773 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 774 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 775 776 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 777 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 778 779 // cmp64 to avoild hiding cmpq 780 void cmp64(Register src1, AddressLiteral src); 781 782 void cmpxchgptr(Register reg, Address adr); 783 784 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 785 786 787 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 788 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 789 790 791 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 792 793 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 794 795 void shlptr(Register dst, int32_t shift); 796 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 797 798 void shrptr(Register dst, int32_t shift); 799 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 800 801 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 802 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 803 804 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 805 806 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 807 void subptr(Register dst, int32_t src); 808 // Force generation of a 4 byte immediate value even if it fits into 8bit 809 void subptr_imm32(Register dst, int32_t src); 810 void subptr(Register dst, Register src); 811 void subptr(Register dst, RegisterOrConstant src) { 812 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 813 else subptr(dst, src.as_register()); 814 } 815 816 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 817 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 818 819 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 820 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 821 822 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 823 824 825 826 // Helper functions for statistics gathering. 827 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 828 void cond_inc32(Condition cond, AddressLiteral counter_addr); 829 // Unconditional atomic increment. 830 void atomic_incl(Address counter_addr); 831 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 832 #ifdef _LP64 833 void atomic_incq(Address counter_addr); 834 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 835 #endif 836 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 837 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 838 839 void lea(Register dst, AddressLiteral adr); 840 void lea(Address dst, AddressLiteral adr); 841 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 842 843 void leal32(Register dst, Address src) { leal(dst, src); } 844 845 // Import other testl() methods from the parent class or else 846 // they will be hidden by the following overriding declaration. 847 using Assembler::testl; 848 void testl(Register dst, AddressLiteral src); 849 850 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 851 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 852 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 853 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 854 855 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 856 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 857 void testptr(Register src1, Register src2); 858 859 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 860 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 861 862 // Calls 863 864 void call(Label& L, relocInfo::relocType rtype); 865 void call(Register entry); 866 867 // NOTE: this call transfers to the effective address of entry NOT 868 // the address contained by entry. This is because this is more natural 869 // for jumps/calls. 870 void call(AddressLiteral entry); 871 872 // Emit the CompiledIC call idiom 873 void ic_call(address entry, jint method_index = 0); 874 875 // Jumps 876 877 // NOTE: these jumps tranfer to the effective address of dst NOT 878 // the address contained by dst. This is because this is more natural 879 // for jumps/calls. 880 void jump(AddressLiteral dst); 881 void jump_cc(Condition cc, AddressLiteral dst); 882 883 // 32bit can do a case table jump in one instruction but we no longer allow the base 884 // to be installed in the Address class. This jump will tranfers to the address 885 // contained in the location described by entry (not the address of entry) 886 void jump(ArrayAddress entry); 887 888 // Floating 889 890 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 891 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 892 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 893 894 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 895 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 896 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 897 898 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 899 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 900 void comiss(XMMRegister dst, AddressLiteral src); 901 902 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 903 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 904 void comisd(XMMRegister dst, AddressLiteral src); 905 906 #ifndef _LP64 907 void fadd_s(Address src) { Assembler::fadd_s(src); } 908 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 909 910 void fldcw(Address src) { Assembler::fldcw(src); } 911 void fldcw(AddressLiteral src); 912 913 void fld_s(int index) { Assembler::fld_s(index); } 914 void fld_s(Address src) { Assembler::fld_s(src); } 915 void fld_s(AddressLiteral src); 916 917 void fld_d(Address src) { Assembler::fld_d(src); } 918 void fld_d(AddressLiteral src); 919 920 void fld_x(Address src) { Assembler::fld_x(src); } 921 void fld_x(AddressLiteral src); 922 923 void fmul_s(Address src) { Assembler::fmul_s(src); } 924 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 925 #endif // _LP64 926 927 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 928 void ldmxcsr(AddressLiteral src); 929 930 #ifdef _LP64 931 private: 932 void sha256_AVX2_one_round_compute( 933 Register reg_old_h, 934 Register reg_a, 935 Register reg_b, 936 Register reg_c, 937 Register reg_d, 938 Register reg_e, 939 Register reg_f, 940 Register reg_g, 941 Register reg_h, 942 int iter); 943 void sha256_AVX2_four_rounds_compute_first(int start); 944 void sha256_AVX2_four_rounds_compute_last(int start); 945 void sha256_AVX2_one_round_and_sched( 946 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 947 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 948 XMMRegister xmm_2, /* ymm6 */ 949 XMMRegister xmm_3, /* ymm7 */ 950 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 951 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 952 Register reg_c, /* edi */ 953 Register reg_d, /* esi */ 954 Register reg_e, /* r8d */ 955 Register reg_f, /* r9d */ 956 Register reg_g, /* r10d */ 957 Register reg_h, /* r11d */ 958 int iter); 959 960 void addm(int disp, Register r1, Register r2); 961 void gfmul(XMMRegister tmp0, XMMRegister t); 962 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 963 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 964 void generateHtbl_one_block(Register htbl); 965 void generateHtbl_eight_blocks(Register htbl); 966 public: 967 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 968 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 969 Register buf, Register state, Register ofs, Register limit, Register rsp, 970 bool multi_block, XMMRegister shuf_mask); 971 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 972 #endif 973 974 #ifdef _LP64 975 private: 976 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 977 Register e, Register f, Register g, Register h, int iteration); 978 979 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 980 Register a, Register b, Register c, Register d, Register e, Register f, 981 Register g, Register h, int iteration); 982 983 void addmq(int disp, Register r1, Register r2); 984 public: 985 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 986 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 987 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 988 XMMRegister shuf_mask); 989 private: 990 void roundEnc(XMMRegister key, int rnum); 991 void lastroundEnc(XMMRegister key, int rnum); 992 void roundDec(XMMRegister key, int rnum); 993 void lastroundDec(XMMRegister key, int rnum); 994 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 995 996 public: 997 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 998 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 999 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 1000 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 1001 1002 #endif 1003 1004 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1005 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1006 Register buf, Register state, Register ofs, Register limit, Register rsp, 1007 bool multi_block); 1008 1009 #ifdef _LP64 1010 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1011 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1012 Register buf, Register state, Register ofs, Register limit, Register rsp, 1013 bool multi_block, XMMRegister shuf_mask); 1014 #else 1015 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1016 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1017 Register buf, Register state, Register ofs, Register limit, Register rsp, 1018 bool multi_block); 1019 #endif 1020 1021 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1022 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1023 Register rax, Register rcx, Register rdx, Register tmp); 1024 1025 #ifdef _LP64 1026 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1027 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1028 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1029 1030 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1031 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1032 Register rax, Register rcx, Register rdx, Register r11); 1033 1034 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1035 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1036 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1037 1038 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1039 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1040 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1041 Register tmp3, Register tmp4); 1042 1043 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1044 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1045 Register rax, Register rcx, Register rdx, Register tmp1, 1046 Register tmp2, Register tmp3, Register tmp4); 1047 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1048 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1049 Register rax, Register rcx, Register rdx, Register tmp1, 1050 Register tmp2, Register tmp3, Register tmp4); 1051 #else 1052 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1053 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1054 Register rax, Register rcx, Register rdx, Register tmp1); 1055 1056 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1057 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1058 Register rax, Register rcx, Register rdx, Register tmp); 1059 1060 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1061 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1062 Register rdx, Register tmp); 1063 1064 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1065 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1066 Register rax, Register rbx, Register rdx); 1067 1068 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1069 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1070 Register rax, Register rcx, Register rdx, Register tmp); 1071 1072 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1073 Register edx, Register ebx, Register esi, Register edi, 1074 Register ebp, Register esp); 1075 1076 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1077 Register esi, Register edi, Register ebp, Register esp); 1078 1079 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1080 Register edx, Register ebx, Register esi, Register edi, 1081 Register ebp, Register esp); 1082 1083 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1084 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1085 Register rax, Register rcx, Register rdx, Register tmp); 1086 #endif 1087 1088 private: 1089 1090 // these are private because users should be doing movflt/movdbl 1091 1092 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1093 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1094 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1095 void movss(XMMRegister dst, AddressLiteral src); 1096 1097 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1098 void movlpd(XMMRegister dst, AddressLiteral src); 1099 1100 public: 1101 1102 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1103 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1104 void addsd(XMMRegister dst, AddressLiteral src); 1105 1106 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1107 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1108 void addss(XMMRegister dst, AddressLiteral src); 1109 1110 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1111 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1112 void addpd(XMMRegister dst, AddressLiteral src); 1113 1114 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1115 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1116 void divsd(XMMRegister dst, AddressLiteral src); 1117 1118 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1119 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1120 void divss(XMMRegister dst, AddressLiteral src); 1121 1122 // Move Unaligned Double Quadword 1123 void movdqu(Address dst, XMMRegister src); 1124 void movdqu(XMMRegister dst, Address src); 1125 void movdqu(XMMRegister dst, XMMRegister src); 1126 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1127 // AVX Unaligned forms 1128 void vmovdqu(Address dst, XMMRegister src); 1129 void vmovdqu(XMMRegister dst, Address src); 1130 void vmovdqu(XMMRegister dst, XMMRegister src); 1131 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1132 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1133 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1134 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1135 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1136 1137 // Move Aligned Double Quadword 1138 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1139 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1140 void movdqa(XMMRegister dst, AddressLiteral src); 1141 1142 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1143 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1144 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1145 void movsd(XMMRegister dst, AddressLiteral src); 1146 1147 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1148 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1149 void mulpd(XMMRegister dst, AddressLiteral src); 1150 1151 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1152 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1153 void mulsd(XMMRegister dst, AddressLiteral src); 1154 1155 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1156 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1157 void mulss(XMMRegister dst, AddressLiteral src); 1158 1159 // Carry-Less Multiplication Quadword 1160 void pclmulldq(XMMRegister dst, XMMRegister src) { 1161 // 0x00 - multiply lower 64 bits [0:63] 1162 Assembler::pclmulqdq(dst, src, 0x00); 1163 } 1164 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1165 // 0x11 - multiply upper 64 bits [64:127] 1166 Assembler::pclmulqdq(dst, src, 0x11); 1167 } 1168 1169 void pcmpeqb(XMMRegister dst, XMMRegister src); 1170 void pcmpeqw(XMMRegister dst, XMMRegister src); 1171 1172 void pcmpestri(XMMRegister dst, Address src, int imm8); 1173 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1174 1175 void pmovzxbw(XMMRegister dst, XMMRegister src); 1176 void pmovzxbw(XMMRegister dst, Address src); 1177 1178 void pmovmskb(Register dst, XMMRegister src); 1179 1180 void ptest(XMMRegister dst, XMMRegister src); 1181 1182 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1183 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1184 void sqrtsd(XMMRegister dst, AddressLiteral src); 1185 1186 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1187 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1188 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1189 1190 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1191 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1192 void sqrtss(XMMRegister dst, AddressLiteral src); 1193 1194 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1195 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1196 void subsd(XMMRegister dst, AddressLiteral src); 1197 1198 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1199 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1200 void subss(XMMRegister dst, AddressLiteral src); 1201 1202 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1203 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1204 void ucomiss(XMMRegister dst, AddressLiteral src); 1205 1206 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1207 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1208 void ucomisd(XMMRegister dst, AddressLiteral src); 1209 1210 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1211 void xorpd(XMMRegister dst, XMMRegister src); 1212 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1213 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1214 1215 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1216 void xorps(XMMRegister dst, XMMRegister src); 1217 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1218 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1219 1220 // Shuffle Bytes 1221 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1222 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1223 void pshufb(XMMRegister dst, AddressLiteral src); 1224 // AVX 3-operands instructions 1225 1226 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1227 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1228 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1229 1230 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1231 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1232 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1233 1234 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1235 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1236 1237 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1238 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1239 1240 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1241 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1242 1243 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1244 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1245 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1246 1247 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1248 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1249 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1250 1251 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1252 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1253 1254 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1255 1256 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1257 1258 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1259 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1260 1261 void vpmovmskb(Register dst, XMMRegister src); 1262 1263 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1264 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1265 1266 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1267 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1268 1269 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1270 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1271 1272 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1273 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1274 1275 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1276 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1277 1278 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1279 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1280 1281 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1282 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1283 1284 void vptest(XMMRegister dst, XMMRegister src); 1285 1286 void punpcklbw(XMMRegister dst, XMMRegister src); 1287 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1288 1289 void pshufd(XMMRegister dst, Address src, int mode); 1290 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1291 1292 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1293 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1294 1295 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1296 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1297 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1298 1299 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1300 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1301 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1302 1303 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1304 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1305 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1306 1307 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1308 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1309 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1310 1311 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1312 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1313 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1314 1315 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1316 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1317 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1318 1319 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1320 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1321 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1322 1323 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1324 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1325 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1326 1327 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1328 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1329 1330 // AVX Vector instructions 1331 1332 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1333 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1334 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1335 1336 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1337 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1338 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1339 1340 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1341 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1342 Assembler::vpxor(dst, nds, src, vector_len); 1343 else 1344 Assembler::vxorpd(dst, nds, src, vector_len); 1345 } 1346 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1347 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1348 Assembler::vpxor(dst, nds, src, vector_len); 1349 else 1350 Assembler::vxorpd(dst, nds, src, vector_len); 1351 } 1352 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1353 1354 // Simple version for AVX2 256bit vectors 1355 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1356 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1357 1358 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1359 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1360 Assembler::vinserti32x4(dst, dst, src, imm8); 1361 } else if (UseAVX > 1) { 1362 // vinserti128 is available only in AVX2 1363 Assembler::vinserti128(dst, nds, src, imm8); 1364 } else { 1365 Assembler::vinsertf128(dst, nds, src, imm8); 1366 } 1367 } 1368 1369 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1370 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1371 Assembler::vinserti32x4(dst, dst, src, imm8); 1372 } else if (UseAVX > 1) { 1373 // vinserti128 is available only in AVX2 1374 Assembler::vinserti128(dst, nds, src, imm8); 1375 } else { 1376 Assembler::vinsertf128(dst, nds, src, imm8); 1377 } 1378 } 1379 1380 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1381 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1382 Assembler::vextracti32x4(dst, src, imm8); 1383 } else if (UseAVX > 1) { 1384 // vextracti128 is available only in AVX2 1385 Assembler::vextracti128(dst, src, imm8); 1386 } else { 1387 Assembler::vextractf128(dst, src, imm8); 1388 } 1389 } 1390 1391 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1392 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1393 Assembler::vextracti32x4(dst, src, imm8); 1394 } else if (UseAVX > 1) { 1395 // vextracti128 is available only in AVX2 1396 Assembler::vextracti128(dst, src, imm8); 1397 } else { 1398 Assembler::vextractf128(dst, src, imm8); 1399 } 1400 } 1401 1402 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1403 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1404 vinserti128(dst, dst, src, 1); 1405 } 1406 void vinserti128_high(XMMRegister dst, Address src) { 1407 vinserti128(dst, dst, src, 1); 1408 } 1409 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1410 vextracti128(dst, src, 1); 1411 } 1412 void vextracti128_high(Address dst, XMMRegister src) { 1413 vextracti128(dst, src, 1); 1414 } 1415 1416 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1417 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1418 Assembler::vinsertf32x4(dst, dst, src, 1); 1419 } else { 1420 Assembler::vinsertf128(dst, dst, src, 1); 1421 } 1422 } 1423 1424 void vinsertf128_high(XMMRegister dst, Address src) { 1425 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1426 Assembler::vinsertf32x4(dst, dst, src, 1); 1427 } else { 1428 Assembler::vinsertf128(dst, dst, src, 1); 1429 } 1430 } 1431 1432 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1433 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1434 Assembler::vextractf32x4(dst, src, 1); 1435 } else { 1436 Assembler::vextractf128(dst, src, 1); 1437 } 1438 } 1439 1440 void vextractf128_high(Address dst, XMMRegister src) { 1441 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1442 Assembler::vextractf32x4(dst, src, 1); 1443 } else { 1444 Assembler::vextractf128(dst, src, 1); 1445 } 1446 } 1447 1448 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1449 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1450 Assembler::vinserti64x4(dst, dst, src, 1); 1451 } 1452 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1453 Assembler::vinsertf64x4(dst, dst, src, 1); 1454 } 1455 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1456 Assembler::vextracti64x4(dst, src, 1); 1457 } 1458 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1459 Assembler::vextractf64x4(dst, src, 1); 1460 } 1461 void vextractf64x4_high(Address dst, XMMRegister src) { 1462 Assembler::vextractf64x4(dst, src, 1); 1463 } 1464 void vinsertf64x4_high(XMMRegister dst, Address src) { 1465 Assembler::vinsertf64x4(dst, dst, src, 1); 1466 } 1467 1468 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1469 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1470 vinserti128(dst, dst, src, 0); 1471 } 1472 void vinserti128_low(XMMRegister dst, Address src) { 1473 vinserti128(dst, dst, src, 0); 1474 } 1475 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1476 vextracti128(dst, src, 0); 1477 } 1478 void vextracti128_low(Address dst, XMMRegister src) { 1479 vextracti128(dst, src, 0); 1480 } 1481 1482 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1483 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1484 Assembler::vinsertf32x4(dst, dst, src, 0); 1485 } else { 1486 Assembler::vinsertf128(dst, dst, src, 0); 1487 } 1488 } 1489 1490 void vinsertf128_low(XMMRegister dst, Address src) { 1491 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1492 Assembler::vinsertf32x4(dst, dst, src, 0); 1493 } else { 1494 Assembler::vinsertf128(dst, dst, src, 0); 1495 } 1496 } 1497 1498 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1499 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1500 Assembler::vextractf32x4(dst, src, 0); 1501 } else { 1502 Assembler::vextractf128(dst, src, 0); 1503 } 1504 } 1505 1506 void vextractf128_low(Address dst, XMMRegister src) { 1507 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1508 Assembler::vextractf32x4(dst, src, 0); 1509 } else { 1510 Assembler::vextractf128(dst, src, 0); 1511 } 1512 } 1513 1514 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1515 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1516 Assembler::vinserti64x4(dst, dst, src, 0); 1517 } 1518 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1519 Assembler::vinsertf64x4(dst, dst, src, 0); 1520 } 1521 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1522 Assembler::vextracti64x4(dst, src, 0); 1523 } 1524 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1525 Assembler::vextractf64x4(dst, src, 0); 1526 } 1527 void vextractf64x4_low(Address dst, XMMRegister src) { 1528 Assembler::vextractf64x4(dst, src, 0); 1529 } 1530 void vinsertf64x4_low(XMMRegister dst, Address src) { 1531 Assembler::vinsertf64x4(dst, dst, src, 0); 1532 } 1533 1534 // Carry-Less Multiplication Quadword 1535 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1536 // 0x00 - multiply lower 64 bits [0:63] 1537 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1538 } 1539 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1540 // 0x11 - multiply upper 64 bits [64:127] 1541 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1542 } 1543 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1544 // 0x10 - multiply nds[0:63] and src[64:127] 1545 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1546 } 1547 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1548 //0x01 - multiply nds[64:127] and src[0:63] 1549 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1550 } 1551 1552 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1553 // 0x00 - multiply lower 64 bits [0:63] 1554 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1555 } 1556 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1557 // 0x11 - multiply upper 64 bits [64:127] 1558 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1559 } 1560 1561 // Data 1562 1563 void cmov32( Condition cc, Register dst, Address src); 1564 void cmov32( Condition cc, Register dst, Register src); 1565 1566 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1567 1568 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1569 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1570 1571 void movoop(Register dst, jobject obj); 1572 void movoop(Address dst, jobject obj); 1573 1574 void mov_metadata(Register dst, Metadata* obj); 1575 void mov_metadata(Address dst, Metadata* obj); 1576 1577 void movptr(ArrayAddress dst, Register src); 1578 // can this do an lea? 1579 void movptr(Register dst, ArrayAddress src); 1580 1581 void movptr(Register dst, Address src); 1582 1583 #ifdef _LP64 1584 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1585 #else 1586 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1587 #endif 1588 1589 void movptr(Register dst, intptr_t src); 1590 void movptr(Register dst, Register src); 1591 void movptr(Address dst, intptr_t src); 1592 1593 void movptr(Address dst, Register src); 1594 1595 void movptr(Register dst, RegisterOrConstant src) { 1596 if (src.is_constant()) movptr(dst, src.as_constant()); 1597 else movptr(dst, src.as_register()); 1598 } 1599 1600 #ifdef _LP64 1601 // Generally the next two are only used for moving NULL 1602 // Although there are situations in initializing the mark word where 1603 // they could be used. They are dangerous. 1604 1605 // They only exist on LP64 so that int32_t and intptr_t are not the same 1606 // and we have ambiguous declarations. 1607 1608 void movptr(Address dst, int32_t imm32); 1609 void movptr(Register dst, int32_t imm32); 1610 #endif // _LP64 1611 1612 // to avoid hiding movl 1613 void mov32(AddressLiteral dst, Register src); 1614 void mov32(Register dst, AddressLiteral src); 1615 1616 // to avoid hiding movb 1617 void movbyte(ArrayAddress dst, int src); 1618 1619 // Import other mov() methods from the parent class or else 1620 // they will be hidden by the following overriding declaration. 1621 using Assembler::movdl; 1622 using Assembler::movq; 1623 void movdl(XMMRegister dst, AddressLiteral src); 1624 void movq(XMMRegister dst, AddressLiteral src); 1625 1626 // Can push value or effective address 1627 void pushptr(AddressLiteral src); 1628 1629 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1630 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1631 1632 void pushoop(jobject obj); 1633 void pushklass(Metadata* obj); 1634 1635 // sign extend as need a l to ptr sized element 1636 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1637 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1638 1639 #ifdef COMPILER2 1640 // Generic instructions support for use in .ad files C2 code generation 1641 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr); 1642 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1643 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr); 1644 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1645 void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 1646 void vextendbw(bool sign, XMMRegister dst, XMMRegister src); 1647 void vshiftd(int opcode, XMMRegister dst, XMMRegister src); 1648 void vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1649 void vshiftw(int opcode, XMMRegister dst, XMMRegister src); 1650 void vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1651 void vshiftq(int opcode, XMMRegister dst, XMMRegister src); 1652 void vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1653 #endif 1654 1655 // C2 compiled method's prolog code. 1656 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1657 1658 // clear memory of size 'cnt' qwords, starting at 'base'; 1659 // if 'is_large' is set, do not try to produce short loop 1660 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large); 1661 1662 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1663 void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp); 1664 1665 #ifdef COMPILER2 1666 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 1667 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 1668 1669 // IndexOf strings. 1670 // Small strings are loaded through stack if they cross page boundary. 1671 void string_indexof(Register str1, Register str2, 1672 Register cnt1, Register cnt2, 1673 int int_cnt2, Register result, 1674 XMMRegister vec, Register tmp, 1675 int ae); 1676 1677 // IndexOf for constant substrings with size >= 8 elements 1678 // which don't need to be loaded through stack. 1679 void string_indexofC8(Register str1, Register str2, 1680 Register cnt1, Register cnt2, 1681 int int_cnt2, Register result, 1682 XMMRegister vec, Register tmp, 1683 int ae); 1684 1685 // Smallest code: we don't need to load through stack, 1686 // check string tail. 1687 1688 // helper function for string_compare 1689 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 1690 Address::ScaleFactor scale, Address::ScaleFactor scale1, 1691 Address::ScaleFactor scale2, Register index, int ae); 1692 // Compare strings. 1693 void string_compare(Register str1, Register str2, 1694 Register cnt1, Register cnt2, Register result, 1695 XMMRegister vec1, int ae); 1696 1697 // Search for Non-ASCII character (Negative byte value) in a byte array, 1698 // return true if it has any and false otherwise. 1699 void has_negatives(Register ary1, Register len, 1700 Register result, Register tmp1, 1701 XMMRegister vec1, XMMRegister vec2); 1702 1703 // Compare char[] or byte[] arrays. 1704 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 1705 Register limit, Register result, Register chr, 1706 XMMRegister vec1, XMMRegister vec2, bool is_char); 1707 1708 #endif 1709 1710 // Fill primitive arrays 1711 void generate_fill(BasicType t, bool aligned, 1712 Register to, Register value, Register count, 1713 Register rtmp, XMMRegister xtmp); 1714 1715 void encode_iso_array(Register src, Register dst, Register len, 1716 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1717 XMMRegister tmp4, Register tmp5, Register result); 1718 1719 #ifdef _LP64 1720 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1721 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1722 Register y, Register y_idx, Register z, 1723 Register carry, Register product, 1724 Register idx, Register kdx); 1725 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1726 Register yz_idx, Register idx, 1727 Register carry, Register product, int offset); 1728 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1729 Register carry, Register carry2, 1730 Register idx, Register jdx, 1731 Register yz_idx1, Register yz_idx2, 1732 Register tmp, Register tmp3, Register tmp4); 1733 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1734 Register yz_idx, Register idx, Register jdx, 1735 Register carry, Register product, 1736 Register carry2); 1737 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1738 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1739 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1740 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1741 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1742 Register tmp2); 1743 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1744 Register rdxReg, Register raxReg); 1745 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1746 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1747 Register tmp3, Register tmp4); 1748 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1749 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1750 1751 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1752 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1753 Register raxReg); 1754 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1755 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1756 Register raxReg); 1757 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1758 Register result, Register tmp1, Register tmp2, 1759 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1760 #endif 1761 1762 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1763 void update_byte_crc32(Register crc, Register val, Register table); 1764 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1765 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1766 // Note on a naming convention: 1767 // Prefix w = register only used on a Westmere+ architecture 1768 // Prefix n = register only used on a Nehalem architecture 1769 #ifdef _LP64 1770 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1771 Register tmp1, Register tmp2, Register tmp3); 1772 #else 1773 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1774 Register tmp1, Register tmp2, Register tmp3, 1775 XMMRegister xtmp1, XMMRegister xtmp2); 1776 #endif 1777 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1778 Register in_out, 1779 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1780 XMMRegister w_xtmp2, 1781 Register tmp1, 1782 Register n_tmp2, Register n_tmp3); 1783 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1784 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1785 Register tmp1, Register tmp2, 1786 Register n_tmp3); 1787 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1788 Register in_out1, Register in_out2, Register in_out3, 1789 Register tmp1, Register tmp2, Register tmp3, 1790 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1791 Register tmp4, Register tmp5, 1792 Register n_tmp6); 1793 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1794 Register tmp1, Register tmp2, Register tmp3, 1795 Register tmp4, Register tmp5, Register tmp6, 1796 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1797 bool is_pclmulqdq_supported); 1798 // Fold 128-bit data chunk 1799 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1800 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1801 // Fold 8-bit data 1802 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1803 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1804 void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1805 1806 // Compress char[] array to byte[]. 1807 void char_array_compress(Register src, Register dst, Register len, 1808 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1809 XMMRegister tmp4, Register tmp5, Register result); 1810 1811 // Inflate byte[] array to char[]. 1812 void byte_array_inflate(Register src, Register dst, Register len, 1813 XMMRegister tmp1, Register tmp2); 1814 1815 #ifdef _LP64 1816 void convert_f2i(Register dst, XMMRegister src); 1817 void convert_d2i(Register dst, XMMRegister src); 1818 void convert_f2l(Register dst, XMMRegister src); 1819 void convert_d2l(Register dst, XMMRegister src); 1820 1821 void cache_wb(Address line); 1822 void cache_wbsync(bool is_pre); 1823 #endif // _LP64 1824 }; 1825 1826 /** 1827 * class SkipIfEqual: 1828 * 1829 * Instantiating this class will result in assembly code being output that will 1830 * jump around any code emitted between the creation of the instance and it's 1831 * automatic destruction at the end of a scope block, depending on the value of 1832 * the flag passed to the constructor, which will be checked at run-time. 1833 */ 1834 class SkipIfEqual { 1835 private: 1836 MacroAssembler* _masm; 1837 Label _label; 1838 1839 public: 1840 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1841 ~SkipIfEqual(); 1842 }; 1843 1844 #endif // CPU_X86_MACROASSEMBLER_X86_HPP