1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 friend class Runtime1; // as_Address() 40 41 public: 42 // Support for VM calls 43 // 44 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 45 // may customize this version by overriding it for its purposes (e.g., to save/restore 46 // additional registers when doing a VM call). 47 48 virtual void call_VM_leaf_base( 49 address entry_point, // the entry point 50 int number_of_arguments // the number of arguments to pop after the call 51 ); 52 53 protected: 54 // This is the base routine called by the different versions of call_VM. The interpreter 55 // may customize this version by overriding it for its purposes (e.g., to save/restore 56 // additional registers when doing a VM call). 57 // 58 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 59 // returns the register which contains the thread upon return. If a thread register has been 60 // specified, the return value will correspond to that register. If no last_java_sp is specified 61 // (noreg) than rsp will be used instead. 62 virtual void call_VM_base( // returns the register containing the thread upon return 63 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 64 Register java_thread, // the thread if computed before ; use noreg otherwise 65 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 66 address entry_point, // the entry point 67 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 68 bool check_exceptions // whether to check for pending exceptions after return 69 ); 70 71 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 72 73 // helpers for FPU flag access 74 // tmp is a temporary register, if none is available use noreg 75 void save_rax (Register tmp); 76 void restore_rax(Register tmp); 77 78 public: 79 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 80 81 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 82 // The implementation is only non-empty for the InterpreterMacroAssembler, 83 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 84 virtual void check_and_handle_popframe(Register java_thread); 85 virtual void check_and_handle_earlyret(Register java_thread); 86 87 Address as_Address(AddressLiteral adr); 88 Address as_Address(ArrayAddress adr); 89 90 // Support for NULL-checks 91 // 92 // Generates code that causes a NULL OS exception if the content of reg is NULL. 93 // If the accessed location is M[reg + offset] and the offset is known, provide the 94 // offset. No explicit code generation is needed if the offset is within a certain 95 // range (0 <= offset <= page_size). 96 97 void null_check(Register reg, int offset = -1); 98 static bool needs_explicit_null_check(intptr_t offset); 99 static bool uses_implicit_null_check(void* address); 100 101 // Required platform-specific helpers for Label::patch_instructions. 102 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 103 void pd_patch_instruction(address branch, address target, const char* file, int line) { 104 unsigned char op = branch[0]; 105 assert(op == 0xE8 /* call */ || 106 op == 0xE9 /* jmp */ || 107 op == 0xEB /* short jmp */ || 108 (op & 0xF0) == 0x70 /* short jcc */ || 109 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 110 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 111 "Invalid opcode at patch point"); 112 113 if (op == 0xEB || (op & 0xF0) == 0x70) { 114 // short offset operators (jmp and jcc) 115 char* disp = (char*) &branch[1]; 116 int imm8 = target - (address) &disp[1]; 117 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 118 file == NULL ? "<NULL>" : file, line); 119 *disp = imm8; 120 } else { 121 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 122 int imm32 = target - (address) &disp[1]; 123 *disp = imm32; 124 } 125 } 126 127 // The following 4 methods return the offset of the appropriate move instruction 128 129 // Support for fast byte/short loading with zero extension (depending on particular CPU) 130 int load_unsigned_byte(Register dst, Address src); 131 int load_unsigned_short(Register dst, Address src); 132 133 // Support for fast byte/short loading with sign extension (depending on particular CPU) 134 int load_signed_byte(Register dst, Address src); 135 int load_signed_short(Register dst, Address src); 136 137 // Support for sign-extension (hi:lo = extend_sign(lo)) 138 void extend_sign(Register hi, Register lo); 139 140 // Load and store values by size and signed-ness 141 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 142 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 143 144 // Support for inc/dec with optimal instruction selection depending on value 145 146 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 147 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 148 149 void decrementl(Address dst, int value = 1); 150 void decrementl(Register reg, int value = 1); 151 152 void decrementq(Register reg, int value = 1); 153 void decrementq(Address dst, int value = 1); 154 155 void incrementl(Address dst, int value = 1); 156 void incrementl(Register reg, int value = 1); 157 158 void incrementq(Register reg, int value = 1); 159 void incrementq(Address dst, int value = 1); 160 161 #ifdef COMPILER2 162 // special instructions for EVEX 163 void setvectmask(Register dst, Register src); 164 void restorevectmask(); 165 #endif 166 167 // Support optimal SSE move instructions. 168 void movflt(XMMRegister dst, XMMRegister src) { 169 if (dst-> encoding() == src->encoding()) return; 170 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 171 else { movss (dst, src); return; } 172 } 173 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 174 void movflt(XMMRegister dst, AddressLiteral src); 175 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 176 177 void movdbl(XMMRegister dst, XMMRegister src) { 178 if (dst-> encoding() == src->encoding()) return; 179 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 180 else { movsd (dst, src); return; } 181 } 182 183 void movdbl(XMMRegister dst, AddressLiteral src); 184 185 void movdbl(XMMRegister dst, Address src) { 186 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 187 else { movlpd(dst, src); return; } 188 } 189 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 190 191 void incrementl(AddressLiteral dst); 192 void incrementl(ArrayAddress dst); 193 194 void incrementq(AddressLiteral dst); 195 196 // Alignment 197 void align(int modulus); 198 void align(int modulus, int target); 199 200 // A 5 byte nop that is safe for patching (see patch_verified_entry) 201 void fat_nop(); 202 203 // Stack frame creation/removal 204 void enter(); 205 void leave(); 206 207 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 208 // The pointer will be loaded into the thread register. 209 void get_thread(Register thread); 210 211 212 // Support for VM calls 213 // 214 // It is imperative that all calls into the VM are handled via the call_VM macros. 215 // They make sure that the stack linkage is setup correctly. call_VM's correspond 216 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 217 218 219 void call_VM(Register oop_result, 220 address entry_point, 221 bool check_exceptions = true); 222 void call_VM(Register oop_result, 223 address entry_point, 224 Register arg_1, 225 bool check_exceptions = true); 226 void call_VM(Register oop_result, 227 address entry_point, 228 Register arg_1, Register arg_2, 229 bool check_exceptions = true); 230 void call_VM(Register oop_result, 231 address entry_point, 232 Register arg_1, Register arg_2, Register arg_3, 233 bool check_exceptions = true); 234 235 // Overloadings with last_Java_sp 236 void call_VM(Register oop_result, 237 Register last_java_sp, 238 address entry_point, 239 int number_of_arguments = 0, 240 bool check_exceptions = true); 241 void call_VM(Register oop_result, 242 Register last_java_sp, 243 address entry_point, 244 Register arg_1, bool 245 check_exceptions = true); 246 void call_VM(Register oop_result, 247 Register last_java_sp, 248 address entry_point, 249 Register arg_1, Register arg_2, 250 bool check_exceptions = true); 251 void call_VM(Register oop_result, 252 Register last_java_sp, 253 address entry_point, 254 Register arg_1, Register arg_2, Register arg_3, 255 bool check_exceptions = true); 256 257 void get_vm_result (Register oop_result, Register thread); 258 void get_vm_result_2(Register metadata_result, Register thread); 259 260 // These always tightly bind to MacroAssembler::call_VM_base 261 // bypassing the virtual implementation 262 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 263 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 264 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 265 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 266 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 267 268 void call_VM_leaf0(address entry_point); 269 void call_VM_leaf(address entry_point, 270 int number_of_arguments = 0); 271 void call_VM_leaf(address entry_point, 272 Register arg_1); 273 void call_VM_leaf(address entry_point, 274 Register arg_1, Register arg_2); 275 void call_VM_leaf(address entry_point, 276 Register arg_1, Register arg_2, Register arg_3); 277 278 // These always tightly bind to MacroAssembler::call_VM_leaf_base 279 // bypassing the virtual implementation 280 void super_call_VM_leaf(address entry_point); 281 void super_call_VM_leaf(address entry_point, Register arg_1); 282 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 283 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 284 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 285 286 // last Java Frame (fills frame anchor) 287 void set_last_Java_frame(Register thread, 288 Register last_java_sp, 289 Register last_java_fp, 290 address last_java_pc); 291 292 // thread in the default location (r15_thread on 64bit) 293 void set_last_Java_frame(Register last_java_sp, 294 Register last_java_fp, 295 address last_java_pc); 296 297 void reset_last_Java_frame(Register thread, bool clear_fp); 298 299 // thread in the default location (r15_thread on 64bit) 300 void reset_last_Java_frame(bool clear_fp); 301 302 // jobjects 303 void clear_jweak_tag(Register possibly_jweak); 304 void resolve_jobject(Register value, Register thread, Register tmp); 305 306 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 307 void c2bool(Register x); 308 309 // C++ bool manipulation 310 311 void movbool(Register dst, Address src); 312 void movbool(Address dst, bool boolconst); 313 void movbool(Address dst, Register src); 314 void testbool(Register dst); 315 316 void resolve_oop_handle(Register result, Register tmp = rscratch2); 317 void resolve_weak_handle(Register result, Register tmp); 318 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 319 void load_method_holder_cld(Register rresult, Register rmethod); 320 321 void load_method_holder(Register holder, Register method); 322 323 // oop manipulations 324 void load_klass(Register dst, Register src); 325 void store_klass(Register dst, Register src); 326 327 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 328 Register tmp1, Register thread_tmp); 329 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 330 Register tmp1, Register tmp2); 331 332 // Resolves obj access. Result is placed in the same register. 333 // All other registers are preserved. 334 void resolve(DecoratorSet decorators, Register obj); 335 336 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 337 Register thread_tmp = noreg, DecoratorSet decorators = 0); 338 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 339 Register thread_tmp = noreg, DecoratorSet decorators = 0); 340 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 341 Register tmp2 = noreg, DecoratorSet decorators = 0); 342 343 // Used for storing NULL. All other oop constants should be 344 // stored using routines that take a jobject. 345 void store_heap_oop_null(Address dst); 346 347 void load_prototype_header(Register dst, Register src); 348 349 #ifdef _LP64 350 void store_klass_gap(Register dst, Register src); 351 352 // This dummy is to prevent a call to store_heap_oop from 353 // converting a zero (like NULL) into a Register by giving 354 // the compiler two choices it can't resolve 355 356 void store_heap_oop(Address dst, void* dummy); 357 358 void encode_heap_oop(Register r); 359 void decode_heap_oop(Register r); 360 void encode_heap_oop_not_null(Register r); 361 void decode_heap_oop_not_null(Register r); 362 void encode_heap_oop_not_null(Register dst, Register src); 363 void decode_heap_oop_not_null(Register dst, Register src); 364 365 void set_narrow_oop(Register dst, jobject obj); 366 void set_narrow_oop(Address dst, jobject obj); 367 void cmp_narrow_oop(Register dst, jobject obj); 368 void cmp_narrow_oop(Address dst, jobject obj); 369 370 void encode_klass_not_null(Register r); 371 void decode_klass_not_null(Register r); 372 void encode_klass_not_null(Register dst, Register src); 373 void decode_klass_not_null(Register dst, Register src); 374 void set_narrow_klass(Register dst, Klass* k); 375 void set_narrow_klass(Address dst, Klass* k); 376 void cmp_narrow_klass(Register dst, Klass* k); 377 void cmp_narrow_klass(Address dst, Klass* k); 378 379 // Returns the byte size of the instructions generated by decode_klass_not_null() 380 // when compressed klass pointers are being used. 381 static int instr_size_for_decode_klass_not_null(); 382 383 // if heap base register is used - reinit it with the correct value 384 void reinit_heapbase(); 385 386 DEBUG_ONLY(void verify_heapbase(const char* msg);) 387 388 #endif // _LP64 389 390 // Int division/remainder for Java 391 // (as idivl, but checks for special case as described in JVM spec.) 392 // returns idivl instruction offset for implicit exception handling 393 int corrected_idivl(Register reg); 394 395 // Long division/remainder for Java 396 // (as idivq, but checks for special case as described in JVM spec.) 397 // returns idivq instruction offset for implicit exception handling 398 int corrected_idivq(Register reg); 399 400 void int3(); 401 402 // Long operation macros for a 32bit cpu 403 // Long negation for Java 404 void lneg(Register hi, Register lo); 405 406 // Long multiplication for Java 407 // (destroys contents of eax, ebx, ecx and edx) 408 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 409 410 // Long shifts for Java 411 // (semantics as described in JVM spec.) 412 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 413 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 414 415 // Long compare for Java 416 // (semantics as described in JVM spec.) 417 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 418 419 420 // misc 421 422 // Sign extension 423 void sign_extend_short(Register reg); 424 void sign_extend_byte(Register reg); 425 426 // Division by power of 2, rounding towards 0 427 void division_with_shift(Register reg, int shift_value); 428 429 #ifndef _LP64 430 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 431 // 432 // CF (corresponds to C0) if x < y 433 // PF (corresponds to C2) if unordered 434 // ZF (corresponds to C3) if x = y 435 // 436 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 437 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 438 void fcmp(Register tmp); 439 // Variant of the above which allows y to be further down the stack 440 // and which only pops x and y if specified. If pop_right is 441 // specified then pop_left must also be specified. 442 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 443 444 // Floating-point comparison for Java 445 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 446 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 447 // (semantics as described in JVM spec.) 448 void fcmp2int(Register dst, bool unordered_is_less); 449 // Variant of the above which allows y to be further down the stack 450 // and which only pops x and y if specified. If pop_right is 451 // specified then pop_left must also be specified. 452 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 453 454 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 455 // tmp is a temporary register, if none is available use noreg 456 void fremr(Register tmp); 457 458 // only if +VerifyFPU 459 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 460 #endif // !LP64 461 462 // dst = c = a * b + c 463 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 464 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 465 466 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 467 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 468 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 469 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 470 471 472 // same as fcmp2int, but using SSE2 473 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 474 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 475 476 // branch to L if FPU flag C2 is set/not set 477 // tmp is a temporary register, if none is available use noreg 478 void jC2 (Register tmp, Label& L); 479 void jnC2(Register tmp, Label& L); 480 481 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 482 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 483 void load_float(Address src); 484 485 // Store float value to 'address'. If UseSSE >= 1, the value is stored 486 // from register xmm0. Otherwise, the value is stored from the FPU stack. 487 void store_float(Address dst); 488 489 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 490 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 491 void load_double(Address src); 492 493 // Store double value to 'address'. If UseSSE >= 2, the value is stored 494 // from register xmm0. Otherwise, the value is stored from the FPU stack. 495 void store_double(Address dst); 496 497 #ifndef _LP64 498 // Pop ST (ffree & fincstp combined) 499 void fpop(); 500 501 void empty_FPU_stack(); 502 #endif // !_LP64 503 504 void push_IU_state(); 505 void pop_IU_state(); 506 507 void push_FPU_state(); 508 void pop_FPU_state(); 509 510 void push_CPU_state(); 511 void pop_CPU_state(); 512 513 // Round up to a power of two 514 void round_to(Register reg, int modulus); 515 516 // Callee saved registers handling 517 void push_callee_saved_registers(); 518 void pop_callee_saved_registers(); 519 520 // allocation 521 void eden_allocate( 522 Register thread, // Current thread 523 Register obj, // result: pointer to object after successful allocation 524 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 525 int con_size_in_bytes, // object size in bytes if known at compile time 526 Register t1, // temp register 527 Label& slow_case // continuation point if fast allocation fails 528 ); 529 void tlab_allocate( 530 Register thread, // Current thread 531 Register obj, // result: pointer to object after successful allocation 532 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 533 int con_size_in_bytes, // object size in bytes if known at compile time 534 Register t1, // temp register 535 Register t2, // temp register 536 Label& slow_case // continuation point if fast allocation fails 537 ); 538 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 539 540 // interface method calling 541 void lookup_interface_method(Register recv_klass, 542 Register intf_klass, 543 RegisterOrConstant itable_index, 544 Register method_result, 545 Register scan_temp, 546 Label& no_such_interface, 547 bool return_method = true); 548 549 // virtual method calling 550 void lookup_virtual_method(Register recv_klass, 551 RegisterOrConstant vtable_index, 552 Register method_result); 553 554 // Test sub_klass against super_klass, with fast and slow paths. 555 556 // The fast path produces a tri-state answer: yes / no / maybe-slow. 557 // One of the three labels can be NULL, meaning take the fall-through. 558 // If super_check_offset is -1, the value is loaded up from super_klass. 559 // No registers are killed, except temp_reg. 560 void check_klass_subtype_fast_path(Register sub_klass, 561 Register super_klass, 562 Register temp_reg, 563 Label* L_success, 564 Label* L_failure, 565 Label* L_slow_path, 566 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 567 568 // The rest of the type check; must be wired to a corresponding fast path. 569 // It does not repeat the fast path logic, so don't use it standalone. 570 // The temp_reg and temp2_reg can be noreg, if no temps are available. 571 // Updates the sub's secondary super cache as necessary. 572 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 573 void check_klass_subtype_slow_path(Register sub_klass, 574 Register super_klass, 575 Register temp_reg, 576 Register temp2_reg, 577 Label* L_success, 578 Label* L_failure, 579 bool set_cond_codes = false); 580 581 // Simplified, combined version, good for typical uses. 582 // Falls through on failure. 583 void check_klass_subtype(Register sub_klass, 584 Register super_klass, 585 Register temp_reg, 586 Label& L_success); 587 588 void clinit_barrier(Register klass, 589 Register thread, 590 Label* L_fast_path = NULL, 591 Label* L_slow_path = NULL); 592 593 // method handles (JSR 292) 594 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 595 596 //---- 597 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 598 599 // Debugging 600 601 // only if +VerifyOops 602 // TODO: Make these macros with file and line like sparc version! 603 void verify_oop(Register reg, const char* s = "broken oop"); 604 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 605 606 // TODO: verify method and klass metadata (compare against vptr?) 607 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 608 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 609 610 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 611 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 612 613 // Verify or restore cpu control state after JNI call 614 void restore_cpu_control_state_after_jni(); 615 616 // prints msg, dumps registers and stops execution 617 void stop(const char* msg); 618 619 // prints msg and continues 620 void warn(const char* msg); 621 622 // dumps registers and other state 623 void print_state(); 624 625 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 626 static void debug64(char* msg, int64_t pc, int64_t regs[]); 627 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 628 static void print_state64(int64_t pc, int64_t regs[]); 629 630 void os_breakpoint(); 631 632 void untested() { stop("untested"); } 633 634 void unimplemented(const char* what = ""); 635 636 void should_not_reach_here() { stop("should not reach here"); } 637 638 void print_CPU_state(); 639 640 // Stack overflow checking 641 void bang_stack_with_offset(int offset) { 642 // stack grows down, caller passes positive offset 643 assert(offset > 0, "must bang with negative offset"); 644 movl(Address(rsp, (-offset)), rax); 645 } 646 647 // Writes to stack successive pages until offset reached to check for 648 // stack overflow + shadow pages. Also, clobbers tmp 649 void bang_stack_size(Register size, Register tmp); 650 651 // Check for reserved stack access in method being exited (for JIT) 652 void reserved_stack_check(); 653 654 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 655 Register tmp, 656 int offset); 657 658 // If thread_reg is != noreg the code assumes the register passed contains 659 // the thread (required on 64 bit). 660 void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); 661 662 void verify_tlab(); 663 664 // Biased locking support 665 // lock_reg and obj_reg must be loaded up with the appropriate values. 666 // swap_reg must be rax, and is killed. 667 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 668 // be killed; if not supplied, push/pop will be used internally to 669 // allocate a temporary (inefficient, avoid if possible). 670 // Optional slow case is for implementations (interpreter and C1) which branch to 671 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 672 // Returns offset of first potentially-faulting instruction for null 673 // check info (currently consumed only by C1). If 674 // swap_reg_contains_mark is true then returns -1 as it is assumed 675 // the calling code has already passed any potential faults. 676 int biased_locking_enter(Register lock_reg, Register obj_reg, 677 Register swap_reg, Register tmp_reg, 678 bool swap_reg_contains_mark, 679 Label& done, Label* slow_case = NULL, 680 BiasedLockingCounters* counters = NULL); 681 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 682 #ifdef COMPILER2 683 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 684 // See full desription in macroAssembler_x86.cpp. 685 void fast_lock(Register obj, Register box, Register tmp, 686 Register scr, Register cx1, Register cx2, 687 BiasedLockingCounters* counters, 688 RTMLockingCounters* rtm_counters, 689 RTMLockingCounters* stack_rtm_counters, 690 Metadata* method_data, 691 bool use_rtm, bool profile_rtm); 692 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 693 #if INCLUDE_RTM_OPT 694 void rtm_counters_update(Register abort_status, Register rtm_counters); 695 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 696 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 697 RTMLockingCounters* rtm_counters, 698 Metadata* method_data); 699 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 700 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 701 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 702 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 703 void rtm_stack_locking(Register obj, Register tmp, Register scr, 704 Register retry_on_abort_count, 705 RTMLockingCounters* stack_rtm_counters, 706 Metadata* method_data, bool profile_rtm, 707 Label& DONE_LABEL, Label& IsInflated); 708 void rtm_inflated_locking(Register obj, Register box, Register tmp, 709 Register scr, Register retry_on_busy_count, 710 Register retry_on_abort_count, 711 RTMLockingCounters* rtm_counters, 712 Metadata* method_data, bool profile_rtm, 713 Label& DONE_LABEL); 714 #endif 715 #endif 716 717 Condition negate_condition(Condition cond); 718 719 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 720 // operands. In general the names are modified to avoid hiding the instruction in Assembler 721 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 722 // here in MacroAssembler. The major exception to this rule is call 723 724 // Arithmetics 725 726 727 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 728 void addptr(Address dst, Register src); 729 730 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 731 void addptr(Register dst, int32_t src); 732 void addptr(Register dst, Register src); 733 void addptr(Register dst, RegisterOrConstant src) { 734 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 735 else addptr(dst, src.as_register()); 736 } 737 738 void andptr(Register dst, int32_t src); 739 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 740 741 void cmp8(AddressLiteral src1, int imm); 742 743 // renamed to drag out the casting of address to int32_t/intptr_t 744 void cmp32(Register src1, int32_t imm); 745 746 void cmp32(AddressLiteral src1, int32_t imm); 747 // compare reg - mem, or reg - &mem 748 void cmp32(Register src1, AddressLiteral src2); 749 750 void cmp32(Register src1, Address src2); 751 752 #ifndef _LP64 753 void cmpklass(Address dst, Metadata* obj); 754 void cmpklass(Register dst, Metadata* obj); 755 void cmpoop(Address dst, jobject obj); 756 void cmpoop_raw(Address dst, jobject obj); 757 #endif // _LP64 758 759 void cmpoop(Register src1, Register src2); 760 void cmpoop(Register src1, Address src2); 761 void cmpoop(Register dst, jobject obj); 762 void cmpoop_raw(Register dst, jobject obj); 763 764 // NOTE src2 must be the lval. This is NOT an mem-mem compare 765 void cmpptr(Address src1, AddressLiteral src2); 766 767 void cmpptr(Register src1, AddressLiteral src2); 768 769 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 770 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 771 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 772 773 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 774 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 775 776 // cmp64 to avoild hiding cmpq 777 void cmp64(Register src1, AddressLiteral src); 778 779 void cmpxchgptr(Register reg, Address adr); 780 781 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 782 783 784 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 785 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 786 787 788 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 789 790 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 791 792 void shlptr(Register dst, int32_t shift); 793 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 794 795 void shrptr(Register dst, int32_t shift); 796 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 797 798 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 799 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 800 801 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 802 803 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 804 void subptr(Register dst, int32_t src); 805 // Force generation of a 4 byte immediate value even if it fits into 8bit 806 void subptr_imm32(Register dst, int32_t src); 807 void subptr(Register dst, Register src); 808 void subptr(Register dst, RegisterOrConstant src) { 809 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 810 else subptr(dst, src.as_register()); 811 } 812 813 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 814 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 815 816 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 817 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 818 819 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 820 821 822 823 // Helper functions for statistics gathering. 824 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 825 void cond_inc32(Condition cond, AddressLiteral counter_addr); 826 // Unconditional atomic increment. 827 void atomic_incl(Address counter_addr); 828 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 829 #ifdef _LP64 830 void atomic_incq(Address counter_addr); 831 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 832 #endif 833 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 834 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 835 836 void lea(Register dst, AddressLiteral adr); 837 void lea(Address dst, AddressLiteral adr); 838 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 839 840 void leal32(Register dst, Address src) { leal(dst, src); } 841 842 // Import other testl() methods from the parent class or else 843 // they will be hidden by the following overriding declaration. 844 using Assembler::testl; 845 void testl(Register dst, AddressLiteral src); 846 847 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 848 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 849 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 850 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 851 852 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 853 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 854 void testptr(Register src1, Register src2); 855 856 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 857 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 858 859 // Calls 860 861 void call(Label& L, relocInfo::relocType rtype); 862 void call(Register entry); 863 864 // NOTE: this call transfers to the effective address of entry NOT 865 // the address contained by entry. This is because this is more natural 866 // for jumps/calls. 867 void call(AddressLiteral entry); 868 869 // Emit the CompiledIC call idiom 870 void ic_call(address entry, jint method_index = 0); 871 872 // Jumps 873 874 // NOTE: these jumps tranfer to the effective address of dst NOT 875 // the address contained by dst. This is because this is more natural 876 // for jumps/calls. 877 void jump(AddressLiteral dst); 878 void jump_cc(Condition cc, AddressLiteral dst); 879 880 // 32bit can do a case table jump in one instruction but we no longer allow the base 881 // to be installed in the Address class. This jump will tranfers to the address 882 // contained in the location described by entry (not the address of entry) 883 void jump(ArrayAddress entry); 884 885 // Floating 886 887 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 888 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 889 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 890 891 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 892 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 893 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 894 895 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 896 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 897 void comiss(XMMRegister dst, AddressLiteral src); 898 899 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 900 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 901 void comisd(XMMRegister dst, AddressLiteral src); 902 903 #ifndef _LP64 904 void fadd_s(Address src) { Assembler::fadd_s(src); } 905 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 906 907 void fldcw(Address src) { Assembler::fldcw(src); } 908 void fldcw(AddressLiteral src); 909 910 void fld_s(int index) { Assembler::fld_s(index); } 911 void fld_s(Address src) { Assembler::fld_s(src); } 912 void fld_s(AddressLiteral src); 913 914 void fld_d(Address src) { Assembler::fld_d(src); } 915 void fld_d(AddressLiteral src); 916 917 void fld_x(Address src) { Assembler::fld_x(src); } 918 void fld_x(AddressLiteral src); 919 920 void fmul_s(Address src) { Assembler::fmul_s(src); } 921 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 922 #endif // _LP64 923 924 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 925 void ldmxcsr(AddressLiteral src); 926 927 #ifdef _LP64 928 private: 929 void sha256_AVX2_one_round_compute( 930 Register reg_old_h, 931 Register reg_a, 932 Register reg_b, 933 Register reg_c, 934 Register reg_d, 935 Register reg_e, 936 Register reg_f, 937 Register reg_g, 938 Register reg_h, 939 int iter); 940 void sha256_AVX2_four_rounds_compute_first(int start); 941 void sha256_AVX2_four_rounds_compute_last(int start); 942 void sha256_AVX2_one_round_and_sched( 943 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 944 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 945 XMMRegister xmm_2, /* ymm6 */ 946 XMMRegister xmm_3, /* ymm7 */ 947 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 948 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 949 Register reg_c, /* edi */ 950 Register reg_d, /* esi */ 951 Register reg_e, /* r8d */ 952 Register reg_f, /* r9d */ 953 Register reg_g, /* r10d */ 954 Register reg_h, /* r11d */ 955 int iter); 956 957 void addm(int disp, Register r1, Register r2); 958 void gfmul(XMMRegister tmp0, XMMRegister t); 959 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 960 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 961 void generateHtbl_one_block(Register htbl); 962 void generateHtbl_eight_blocks(Register htbl); 963 public: 964 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 965 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 966 Register buf, Register state, Register ofs, Register limit, Register rsp, 967 bool multi_block, XMMRegister shuf_mask); 968 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 969 #endif 970 971 #ifdef _LP64 972 private: 973 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 974 Register e, Register f, Register g, Register h, int iteration); 975 976 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 977 Register a, Register b, Register c, Register d, Register e, Register f, 978 Register g, Register h, int iteration); 979 980 void addmq(int disp, Register r1, Register r2); 981 public: 982 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 983 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 984 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 985 XMMRegister shuf_mask); 986 private: 987 void roundEnc(XMMRegister key, int rnum); 988 void lastroundEnc(XMMRegister key, int rnum); 989 void roundDec(XMMRegister key, int rnum); 990 void lastroundDec(XMMRegister key, int rnum); 991 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 992 993 public: 994 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 995 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 996 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 997 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 998 999 #endif 1000 1001 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1002 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1003 Register buf, Register state, Register ofs, Register limit, Register rsp, 1004 bool multi_block); 1005 1006 #ifdef _LP64 1007 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1008 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1009 Register buf, Register state, Register ofs, Register limit, Register rsp, 1010 bool multi_block, XMMRegister shuf_mask); 1011 #else 1012 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1013 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1014 Register buf, Register state, Register ofs, Register limit, Register rsp, 1015 bool multi_block); 1016 #endif 1017 1018 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1019 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1020 Register rax, Register rcx, Register rdx, Register tmp); 1021 1022 #ifdef _LP64 1023 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1024 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1025 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1026 1027 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1028 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1029 Register rax, Register rcx, Register rdx, Register r11); 1030 1031 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1032 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1033 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1034 1035 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1036 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1037 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1038 Register tmp3, Register tmp4); 1039 1040 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1041 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1042 Register rax, Register rcx, Register rdx, Register tmp1, 1043 Register tmp2, Register tmp3, Register tmp4); 1044 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1045 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1046 Register rax, Register rcx, Register rdx, Register tmp1, 1047 Register tmp2, Register tmp3, Register tmp4); 1048 #else 1049 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1050 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1051 Register rax, Register rcx, Register rdx, Register tmp1); 1052 1053 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1054 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1055 Register rax, Register rcx, Register rdx, Register tmp); 1056 1057 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1058 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1059 Register rdx, Register tmp); 1060 1061 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1062 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1063 Register rax, Register rbx, Register rdx); 1064 1065 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1066 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1067 Register rax, Register rcx, Register rdx, Register tmp); 1068 1069 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1070 Register edx, Register ebx, Register esi, Register edi, 1071 Register ebp, Register esp); 1072 1073 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1074 Register esi, Register edi, Register ebp, Register esp); 1075 1076 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1077 Register edx, Register ebx, Register esi, Register edi, 1078 Register ebp, Register esp); 1079 1080 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1081 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1082 Register rax, Register rcx, Register rdx, Register tmp); 1083 #endif 1084 1085 private: 1086 1087 // these are private because users should be doing movflt/movdbl 1088 1089 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1090 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1091 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1092 void movss(XMMRegister dst, AddressLiteral src); 1093 1094 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1095 void movlpd(XMMRegister dst, AddressLiteral src); 1096 1097 public: 1098 1099 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1100 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1101 void addsd(XMMRegister dst, AddressLiteral src); 1102 1103 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1104 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1105 void addss(XMMRegister dst, AddressLiteral src); 1106 1107 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1108 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1109 void addpd(XMMRegister dst, AddressLiteral src); 1110 1111 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1112 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1113 void divsd(XMMRegister dst, AddressLiteral src); 1114 1115 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1116 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1117 void divss(XMMRegister dst, AddressLiteral src); 1118 1119 // Move Unaligned Double Quadword 1120 void movdqu(Address dst, XMMRegister src); 1121 void movdqu(XMMRegister dst, Address src); 1122 void movdqu(XMMRegister dst, XMMRegister src); 1123 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1124 // AVX Unaligned forms 1125 void vmovdqu(Address dst, XMMRegister src); 1126 void vmovdqu(XMMRegister dst, Address src); 1127 void vmovdqu(XMMRegister dst, XMMRegister src); 1128 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1129 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1130 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1131 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1132 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1133 1134 // Move Aligned Double Quadword 1135 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1136 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1137 void movdqa(XMMRegister dst, AddressLiteral src); 1138 1139 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1140 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1141 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1142 void movsd(XMMRegister dst, AddressLiteral src); 1143 1144 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1145 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1146 void mulpd(XMMRegister dst, AddressLiteral src); 1147 1148 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1149 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1150 void mulsd(XMMRegister dst, AddressLiteral src); 1151 1152 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1153 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1154 void mulss(XMMRegister dst, AddressLiteral src); 1155 1156 // Carry-Less Multiplication Quadword 1157 void pclmulldq(XMMRegister dst, XMMRegister src) { 1158 // 0x00 - multiply lower 64 bits [0:63] 1159 Assembler::pclmulqdq(dst, src, 0x00); 1160 } 1161 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1162 // 0x11 - multiply upper 64 bits [64:127] 1163 Assembler::pclmulqdq(dst, src, 0x11); 1164 } 1165 1166 void pcmpeqb(XMMRegister dst, XMMRegister src); 1167 void pcmpeqw(XMMRegister dst, XMMRegister src); 1168 1169 void pcmpestri(XMMRegister dst, Address src, int imm8); 1170 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1171 1172 void pmovzxbw(XMMRegister dst, XMMRegister src); 1173 void pmovzxbw(XMMRegister dst, Address src); 1174 1175 void pmovmskb(Register dst, XMMRegister src); 1176 1177 void ptest(XMMRegister dst, XMMRegister src); 1178 1179 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1180 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1181 void sqrtsd(XMMRegister dst, AddressLiteral src); 1182 1183 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1184 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1185 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1186 1187 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1188 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1189 void sqrtss(XMMRegister dst, AddressLiteral src); 1190 1191 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1192 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1193 void subsd(XMMRegister dst, AddressLiteral src); 1194 1195 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1196 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1197 void subss(XMMRegister dst, AddressLiteral src); 1198 1199 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1200 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1201 void ucomiss(XMMRegister dst, AddressLiteral src); 1202 1203 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1204 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1205 void ucomisd(XMMRegister dst, AddressLiteral src); 1206 1207 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1208 void xorpd(XMMRegister dst, XMMRegister src); 1209 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1210 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1211 1212 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1213 void xorps(XMMRegister dst, XMMRegister src); 1214 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1215 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1216 1217 // Shuffle Bytes 1218 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1219 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1220 void pshufb(XMMRegister dst, AddressLiteral src); 1221 // AVX 3-operands instructions 1222 1223 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1224 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1225 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1226 1227 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1228 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1229 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1230 1231 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1232 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1233 1234 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1235 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1236 1237 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1238 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1239 1240 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1241 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1242 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1243 1244 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1245 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1246 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1247 1248 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1249 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1250 1251 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1252 1253 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1254 1255 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1256 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1257 1258 void vpmovmskb(Register dst, XMMRegister src); 1259 1260 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1261 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1262 1263 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1264 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1265 1266 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1267 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1268 1269 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1270 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1271 1272 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1273 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1274 1275 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1276 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1277 1278 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1279 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1280 1281 void vptest(XMMRegister dst, XMMRegister src); 1282 1283 void punpcklbw(XMMRegister dst, XMMRegister src); 1284 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1285 1286 void pshufd(XMMRegister dst, Address src, int mode); 1287 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1288 1289 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1290 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1291 1292 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1293 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1294 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1295 1296 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1297 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1298 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1299 1300 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1301 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1302 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1303 1304 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1305 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1306 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1307 1308 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1309 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1310 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1311 1312 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1313 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1314 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1315 1316 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1317 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1318 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1319 1320 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1321 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1322 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1323 1324 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1325 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1326 1327 // AVX Vector instructions 1328 1329 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1330 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1331 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1332 1333 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1334 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1335 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1336 1337 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1338 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1339 Assembler::vpxor(dst, nds, src, vector_len); 1340 else 1341 Assembler::vxorpd(dst, nds, src, vector_len); 1342 } 1343 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1344 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1345 Assembler::vpxor(dst, nds, src, vector_len); 1346 else 1347 Assembler::vxorpd(dst, nds, src, vector_len); 1348 } 1349 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1350 1351 // Simple version for AVX2 256bit vectors 1352 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1353 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1354 1355 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1356 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1357 Assembler::vinserti32x4(dst, dst, src, imm8); 1358 } else if (UseAVX > 1) { 1359 // vinserti128 is available only in AVX2 1360 Assembler::vinserti128(dst, nds, src, imm8); 1361 } else { 1362 Assembler::vinsertf128(dst, nds, src, imm8); 1363 } 1364 } 1365 1366 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1367 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1368 Assembler::vinserti32x4(dst, dst, src, imm8); 1369 } else if (UseAVX > 1) { 1370 // vinserti128 is available only in AVX2 1371 Assembler::vinserti128(dst, nds, src, imm8); 1372 } else { 1373 Assembler::vinsertf128(dst, nds, src, imm8); 1374 } 1375 } 1376 1377 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1378 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1379 Assembler::vextracti32x4(dst, src, imm8); 1380 } else if (UseAVX > 1) { 1381 // vextracti128 is available only in AVX2 1382 Assembler::vextracti128(dst, src, imm8); 1383 } else { 1384 Assembler::vextractf128(dst, src, imm8); 1385 } 1386 } 1387 1388 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1389 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1390 Assembler::vextracti32x4(dst, src, imm8); 1391 } else if (UseAVX > 1) { 1392 // vextracti128 is available only in AVX2 1393 Assembler::vextracti128(dst, src, imm8); 1394 } else { 1395 Assembler::vextractf128(dst, src, imm8); 1396 } 1397 } 1398 1399 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1400 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1401 vinserti128(dst, dst, src, 1); 1402 } 1403 void vinserti128_high(XMMRegister dst, Address src) { 1404 vinserti128(dst, dst, src, 1); 1405 } 1406 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1407 vextracti128(dst, src, 1); 1408 } 1409 void vextracti128_high(Address dst, XMMRegister src) { 1410 vextracti128(dst, src, 1); 1411 } 1412 1413 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1414 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1415 Assembler::vinsertf32x4(dst, dst, src, 1); 1416 } else { 1417 Assembler::vinsertf128(dst, dst, src, 1); 1418 } 1419 } 1420 1421 void vinsertf128_high(XMMRegister dst, Address src) { 1422 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1423 Assembler::vinsertf32x4(dst, dst, src, 1); 1424 } else { 1425 Assembler::vinsertf128(dst, dst, src, 1); 1426 } 1427 } 1428 1429 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1430 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1431 Assembler::vextractf32x4(dst, src, 1); 1432 } else { 1433 Assembler::vextractf128(dst, src, 1); 1434 } 1435 } 1436 1437 void vextractf128_high(Address dst, XMMRegister src) { 1438 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1439 Assembler::vextractf32x4(dst, src, 1); 1440 } else { 1441 Assembler::vextractf128(dst, src, 1); 1442 } 1443 } 1444 1445 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1446 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1447 Assembler::vinserti64x4(dst, dst, src, 1); 1448 } 1449 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1450 Assembler::vinsertf64x4(dst, dst, src, 1); 1451 } 1452 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1453 Assembler::vextracti64x4(dst, src, 1); 1454 } 1455 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1456 Assembler::vextractf64x4(dst, src, 1); 1457 } 1458 void vextractf64x4_high(Address dst, XMMRegister src) { 1459 Assembler::vextractf64x4(dst, src, 1); 1460 } 1461 void vinsertf64x4_high(XMMRegister dst, Address src) { 1462 Assembler::vinsertf64x4(dst, dst, src, 1); 1463 } 1464 1465 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1466 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1467 vinserti128(dst, dst, src, 0); 1468 } 1469 void vinserti128_low(XMMRegister dst, Address src) { 1470 vinserti128(dst, dst, src, 0); 1471 } 1472 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1473 vextracti128(dst, src, 0); 1474 } 1475 void vextracti128_low(Address dst, XMMRegister src) { 1476 vextracti128(dst, src, 0); 1477 } 1478 1479 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1480 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1481 Assembler::vinsertf32x4(dst, dst, src, 0); 1482 } else { 1483 Assembler::vinsertf128(dst, dst, src, 0); 1484 } 1485 } 1486 1487 void vinsertf128_low(XMMRegister dst, Address src) { 1488 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1489 Assembler::vinsertf32x4(dst, dst, src, 0); 1490 } else { 1491 Assembler::vinsertf128(dst, dst, src, 0); 1492 } 1493 } 1494 1495 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1496 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1497 Assembler::vextractf32x4(dst, src, 0); 1498 } else { 1499 Assembler::vextractf128(dst, src, 0); 1500 } 1501 } 1502 1503 void vextractf128_low(Address dst, XMMRegister src) { 1504 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1505 Assembler::vextractf32x4(dst, src, 0); 1506 } else { 1507 Assembler::vextractf128(dst, src, 0); 1508 } 1509 } 1510 1511 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1512 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1513 Assembler::vinserti64x4(dst, dst, src, 0); 1514 } 1515 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1516 Assembler::vinsertf64x4(dst, dst, src, 0); 1517 } 1518 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1519 Assembler::vextracti64x4(dst, src, 0); 1520 } 1521 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1522 Assembler::vextractf64x4(dst, src, 0); 1523 } 1524 void vextractf64x4_low(Address dst, XMMRegister src) { 1525 Assembler::vextractf64x4(dst, src, 0); 1526 } 1527 void vinsertf64x4_low(XMMRegister dst, Address src) { 1528 Assembler::vinsertf64x4(dst, dst, src, 0); 1529 } 1530 1531 // Carry-Less Multiplication Quadword 1532 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1533 // 0x00 - multiply lower 64 bits [0:63] 1534 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1535 } 1536 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1537 // 0x11 - multiply upper 64 bits [64:127] 1538 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1539 } 1540 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1541 // 0x10 - multiply nds[0:63] and src[64:127] 1542 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1543 } 1544 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1545 //0x01 - multiply nds[64:127] and src[0:63] 1546 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1547 } 1548 1549 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1550 // 0x00 - multiply lower 64 bits [0:63] 1551 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1552 } 1553 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1554 // 0x11 - multiply upper 64 bits [64:127] 1555 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1556 } 1557 1558 // Data 1559 1560 void cmov32( Condition cc, Register dst, Address src); 1561 void cmov32( Condition cc, Register dst, Register src); 1562 1563 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1564 1565 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1566 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1567 1568 void movoop(Register dst, jobject obj); 1569 void movoop(Address dst, jobject obj); 1570 1571 void mov_metadata(Register dst, Metadata* obj); 1572 void mov_metadata(Address dst, Metadata* obj); 1573 1574 void movptr(ArrayAddress dst, Register src); 1575 // can this do an lea? 1576 void movptr(Register dst, ArrayAddress src); 1577 1578 void movptr(Register dst, Address src); 1579 1580 #ifdef _LP64 1581 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1582 #else 1583 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1584 #endif 1585 1586 void movptr(Register dst, intptr_t src); 1587 void movptr(Register dst, Register src); 1588 void movptr(Address dst, intptr_t src); 1589 1590 void movptr(Address dst, Register src); 1591 1592 void movptr(Register dst, RegisterOrConstant src) { 1593 if (src.is_constant()) movptr(dst, src.as_constant()); 1594 else movptr(dst, src.as_register()); 1595 } 1596 1597 #ifdef _LP64 1598 // Generally the next two are only used for moving NULL 1599 // Although there are situations in initializing the mark word where 1600 // they could be used. They are dangerous. 1601 1602 // They only exist on LP64 so that int32_t and intptr_t are not the same 1603 // and we have ambiguous declarations. 1604 1605 void movptr(Address dst, int32_t imm32); 1606 void movptr(Register dst, int32_t imm32); 1607 #endif // _LP64 1608 1609 // to avoid hiding movl 1610 void mov32(AddressLiteral dst, Register src); 1611 void mov32(Register dst, AddressLiteral src); 1612 1613 // to avoid hiding movb 1614 void movbyte(ArrayAddress dst, int src); 1615 1616 // Import other mov() methods from the parent class or else 1617 // they will be hidden by the following overriding declaration. 1618 using Assembler::movdl; 1619 using Assembler::movq; 1620 void movdl(XMMRegister dst, AddressLiteral src); 1621 void movq(XMMRegister dst, AddressLiteral src); 1622 1623 // Can push value or effective address 1624 void pushptr(AddressLiteral src); 1625 1626 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1627 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1628 1629 void pushoop(jobject obj); 1630 void pushklass(Metadata* obj); 1631 1632 // sign extend as need a l to ptr sized element 1633 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1634 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1635 1636 #ifdef COMPILER2 1637 // Generic instructions support for use in .ad files C2 code generation 1638 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr); 1639 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1640 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr); 1641 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1642 void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 1643 void vextendbw(bool sign, XMMRegister dst, XMMRegister src); 1644 void vshiftd(int opcode, XMMRegister dst, XMMRegister src); 1645 void vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1646 void vshiftw(int opcode, XMMRegister dst, XMMRegister src); 1647 void vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1648 void vshiftq(int opcode, XMMRegister dst, XMMRegister src); 1649 void vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1650 #endif 1651 1652 // C2 compiled method's prolog code. 1653 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1654 1655 // clear memory of size 'cnt' qwords, starting at 'base'; 1656 // if 'is_large' is set, do not try to produce short loop 1657 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large); 1658 1659 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1660 void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp); 1661 1662 #ifdef COMPILER2 1663 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 1664 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 1665 1666 // IndexOf strings. 1667 // Small strings are loaded through stack if they cross page boundary. 1668 void string_indexof(Register str1, Register str2, 1669 Register cnt1, Register cnt2, 1670 int int_cnt2, Register result, 1671 XMMRegister vec, Register tmp, 1672 int ae); 1673 1674 // IndexOf for constant substrings with size >= 8 elements 1675 // which don't need to be loaded through stack. 1676 void string_indexofC8(Register str1, Register str2, 1677 Register cnt1, Register cnt2, 1678 int int_cnt2, Register result, 1679 XMMRegister vec, Register tmp, 1680 int ae); 1681 1682 // Smallest code: we don't need to load through stack, 1683 // check string tail. 1684 1685 // helper function for string_compare 1686 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 1687 Address::ScaleFactor scale, Address::ScaleFactor scale1, 1688 Address::ScaleFactor scale2, Register index, int ae); 1689 // Compare strings. 1690 void string_compare(Register str1, Register str2, 1691 Register cnt1, Register cnt2, Register result, 1692 XMMRegister vec1, int ae); 1693 1694 // Search for Non-ASCII character (Negative byte value) in a byte array, 1695 // return true if it has any and false otherwise. 1696 void has_negatives(Register ary1, Register len, 1697 Register result, Register tmp1, 1698 XMMRegister vec1, XMMRegister vec2); 1699 1700 // Compare char[] or byte[] arrays. 1701 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 1702 Register limit, Register result, Register chr, 1703 XMMRegister vec1, XMMRegister vec2, bool is_char); 1704 1705 #endif 1706 1707 // Fill primitive arrays 1708 void generate_fill(BasicType t, bool aligned, 1709 Register to, Register value, Register count, 1710 Register rtmp, XMMRegister xtmp); 1711 1712 void encode_iso_array(Register src, Register dst, Register len, 1713 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1714 XMMRegister tmp4, Register tmp5, Register result); 1715 1716 #ifdef _LP64 1717 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1718 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1719 Register y, Register y_idx, Register z, 1720 Register carry, Register product, 1721 Register idx, Register kdx); 1722 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1723 Register yz_idx, Register idx, 1724 Register carry, Register product, int offset); 1725 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1726 Register carry, Register carry2, 1727 Register idx, Register jdx, 1728 Register yz_idx1, Register yz_idx2, 1729 Register tmp, Register tmp3, Register tmp4); 1730 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1731 Register yz_idx, Register idx, Register jdx, 1732 Register carry, Register product, 1733 Register carry2); 1734 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1735 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1736 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1737 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1738 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1739 Register tmp2); 1740 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1741 Register rdxReg, Register raxReg); 1742 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1743 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1744 Register tmp3, Register tmp4); 1745 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1746 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1747 1748 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1749 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1750 Register raxReg); 1751 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1752 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1753 Register raxReg); 1754 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1755 Register result, Register tmp1, Register tmp2, 1756 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1757 #endif 1758 1759 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1760 void update_byte_crc32(Register crc, Register val, Register table); 1761 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1762 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1763 // Note on a naming convention: 1764 // Prefix w = register only used on a Westmere+ architecture 1765 // Prefix n = register only used on a Nehalem architecture 1766 #ifdef _LP64 1767 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1768 Register tmp1, Register tmp2, Register tmp3); 1769 #else 1770 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1771 Register tmp1, Register tmp2, Register tmp3, 1772 XMMRegister xtmp1, XMMRegister xtmp2); 1773 #endif 1774 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1775 Register in_out, 1776 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1777 XMMRegister w_xtmp2, 1778 Register tmp1, 1779 Register n_tmp2, Register n_tmp3); 1780 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1781 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1782 Register tmp1, Register tmp2, 1783 Register n_tmp3); 1784 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1785 Register in_out1, Register in_out2, Register in_out3, 1786 Register tmp1, Register tmp2, Register tmp3, 1787 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1788 Register tmp4, Register tmp5, 1789 Register n_tmp6); 1790 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1791 Register tmp1, Register tmp2, Register tmp3, 1792 Register tmp4, Register tmp5, Register tmp6, 1793 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1794 bool is_pclmulqdq_supported); 1795 // Fold 128-bit data chunk 1796 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1797 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1798 // Fold 8-bit data 1799 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1800 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1801 void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1802 1803 // Compress char[] array to byte[]. 1804 void char_array_compress(Register src, Register dst, Register len, 1805 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1806 XMMRegister tmp4, Register tmp5, Register result); 1807 1808 // Inflate byte[] array to char[]. 1809 void byte_array_inflate(Register src, Register dst, Register len, 1810 XMMRegister tmp1, Register tmp2); 1811 1812 #ifdef _LP64 1813 void convert_f2i(Register dst, XMMRegister src); 1814 void convert_d2i(Register dst, XMMRegister src); 1815 void convert_f2l(Register dst, XMMRegister src); 1816 void convert_d2l(Register dst, XMMRegister src); 1817 1818 void cache_wb(Address line); 1819 void cache_wbsync(bool is_pre); 1820 #endif // _LP64 1821 }; 1822 1823 /** 1824 * class SkipIfEqual: 1825 * 1826 * Instantiating this class will result in assembly code being output that will 1827 * jump around any code emitted between the creation of the instance and it's 1828 * automatic destruction at the end of a scope block, depending on the value of 1829 * the flag passed to the constructor, which will be checked at run-time. 1830 */ 1831 class SkipIfEqual { 1832 private: 1833 MacroAssembler* _masm; 1834 Label _label; 1835 1836 public: 1837 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1838 ~SkipIfEqual(); 1839 }; 1840 1841 #endif // CPU_X86_MACROASSEMBLER_X86_HPP