1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 #include "runtime/signature.hpp" 32 33 class ciValueKlass; 34 35 // MacroAssembler extends Assembler by frequently used macros. 36 // 37 // Instructions for which a 'better' code sequence exists depending 38 // on arguments should also go in here. 39 40 class MacroAssembler: public Assembler { 41 friend class LIR_Assembler; 42 friend class Runtime1; // as_Address() 43 44 public: 45 // Support for VM calls 46 // 47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 48 // may customize this version by overriding it for its purposes (e.g., to save/restore 49 // additional registers when doing a VM call). 50 51 virtual void call_VM_leaf_base( 52 address entry_point, // the entry point 53 int number_of_arguments // the number of arguments to pop after the call 54 ); 55 56 protected: 57 // This is the base routine called by the different versions of call_VM. The interpreter 58 // may customize this version by overriding it for its purposes (e.g., to save/restore 59 // additional registers when doing a VM call). 60 // 61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 62 // returns the register which contains the thread upon return. If a thread register has been 63 // specified, the return value will correspond to that register. If no last_java_sp is specified 64 // (noreg) than rsp will be used instead. 65 virtual void call_VM_base( // returns the register containing the thread upon return 66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 67 Register java_thread, // the thread if computed before ; use noreg otherwise 68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 69 address entry_point, // the entry point 70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 71 bool check_exceptions // whether to check for pending exceptions after return 72 ); 73 74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 75 76 // helpers for FPU flag access 77 // tmp is a temporary register, if none is available use noreg 78 void save_rax (Register tmp); 79 void restore_rax(Register tmp); 80 81 public: 82 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 83 84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 85 // The implementation is only non-empty for the InterpreterMacroAssembler, 86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 87 virtual void check_and_handle_popframe(Register java_thread); 88 virtual void check_and_handle_earlyret(Register java_thread); 89 90 Address as_Address(AddressLiteral adr); 91 Address as_Address(ArrayAddress adr); 92 93 // Support for NULL-checks 94 // 95 // Generates code that causes a NULL OS exception if the content of reg is NULL. 96 // If the accessed location is M[reg + offset] and the offset is known, provide the 97 // offset. No explicit code generation is needed if the offset is within a certain 98 // range (0 <= offset <= page_size). 99 100 void null_check(Register reg, int offset = -1); 101 static bool needs_explicit_null_check(intptr_t offset); 102 static bool uses_implicit_null_check(void* address); 103 104 // valueKlass queries, kills temp_reg 105 void test_klass_is_value(Register klass, Register temp_reg, Label& is_value); 106 void test_klass_is_empty_value(Register klass, Register temp_reg, Label& is_empty_value); 107 108 // Get the default value oop for the given ValueKlass 109 void get_default_value_oop(Register value_klass, Register temp_reg, Register obj); 110 // The empty value oop, for the given ValueKlass ("empty" as in no instance fields) 111 // get_default_value_oop with extra assertion for empty value klass 112 void get_empty_value_oop(Register value_klass, Register temp_reg, Register obj); 113 114 void test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable); 115 void test_field_is_not_flattenable(Register flags, Register temp_reg, Label& notFlattenable); 116 void test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened); 117 118 // Check oops array storage properties, i.e. flattened and/or null-free 119 void test_flattened_array_oop(Register oop, Register temp_reg, Label&is_flattened_array); 120 void test_non_flattened_array_oop(Register oop, Register temp_reg, Label&is_non_flattened_array); 121 void test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array); 122 void test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array); 123 124 // Required platform-specific helpers for Label::patch_instructions. 125 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 126 void pd_patch_instruction(address branch, address target, const char* file, int line) { 127 unsigned char op = branch[0]; 128 assert(op == 0xE8 /* call */ || 129 op == 0xE9 /* jmp */ || 130 op == 0xEB /* short jmp */ || 131 (op & 0xF0) == 0x70 /* short jcc */ || 132 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 133 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 134 "Invalid opcode at patch point"); 135 136 if (op == 0xEB || (op & 0xF0) == 0x70) { 137 // short offset operators (jmp and jcc) 138 char* disp = (char*) &branch[1]; 139 int imm8 = target - (address) &disp[1]; 140 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 141 file == NULL ? "<NULL>" : file, line); 142 *disp = imm8; 143 } else { 144 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 145 int imm32 = target - (address) &disp[1]; 146 *disp = imm32; 147 } 148 } 149 150 // The following 4 methods return the offset of the appropriate move instruction 151 152 // Support for fast byte/short loading with zero extension (depending on particular CPU) 153 int load_unsigned_byte(Register dst, Address src); 154 int load_unsigned_short(Register dst, Address src); 155 156 // Support for fast byte/short loading with sign extension (depending on particular CPU) 157 int load_signed_byte(Register dst, Address src); 158 int load_signed_short(Register dst, Address src); 159 160 // Support for sign-extension (hi:lo = extend_sign(lo)) 161 void extend_sign(Register hi, Register lo); 162 163 // Load and store values by size and signed-ness 164 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 165 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 166 167 // Support for inc/dec with optimal instruction selection depending on value 168 169 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 170 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 171 172 void decrementl(Address dst, int value = 1); 173 void decrementl(Register reg, int value = 1); 174 175 void decrementq(Register reg, int value = 1); 176 void decrementq(Address dst, int value = 1); 177 178 void incrementl(Address dst, int value = 1); 179 void incrementl(Register reg, int value = 1); 180 181 void incrementq(Register reg, int value = 1); 182 void incrementq(Address dst, int value = 1); 183 184 #ifdef COMPILER2 185 // special instructions for EVEX 186 void setvectmask(Register dst, Register src); 187 void restorevectmask(); 188 #endif 189 190 // Support optimal SSE move instructions. 191 void movflt(XMMRegister dst, XMMRegister src) { 192 if (dst-> encoding() == src->encoding()) return; 193 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 194 else { movss (dst, src); return; } 195 } 196 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 197 void movflt(XMMRegister dst, AddressLiteral src); 198 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 199 200 void movdbl(XMMRegister dst, XMMRegister src) { 201 if (dst-> encoding() == src->encoding()) return; 202 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 203 else { movsd (dst, src); return; } 204 } 205 206 void movdbl(XMMRegister dst, AddressLiteral src); 207 208 void movdbl(XMMRegister dst, Address src) { 209 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 210 else { movlpd(dst, src); return; } 211 } 212 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 213 214 void incrementl(AddressLiteral dst); 215 void incrementl(ArrayAddress dst); 216 217 void incrementq(AddressLiteral dst); 218 219 // Alignment 220 void align(int modulus); 221 void align(int modulus, int target); 222 223 // A 5 byte nop that is safe for patching (see patch_verified_entry) 224 void fat_nop(); 225 226 // Stack frame creation/removal 227 void enter(); 228 void leave(); 229 230 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 231 // The pointer will be loaded into the thread register. 232 void get_thread(Register thread); 233 234 235 // Support for VM calls 236 // 237 // It is imperative that all calls into the VM are handled via the call_VM macros. 238 // They make sure that the stack linkage is setup correctly. call_VM's correspond 239 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 240 241 242 void call_VM(Register oop_result, 243 address entry_point, 244 bool check_exceptions = true); 245 void call_VM(Register oop_result, 246 address entry_point, 247 Register arg_1, 248 bool check_exceptions = true); 249 void call_VM(Register oop_result, 250 address entry_point, 251 Register arg_1, Register arg_2, 252 bool check_exceptions = true); 253 void call_VM(Register oop_result, 254 address entry_point, 255 Register arg_1, Register arg_2, Register arg_3, 256 bool check_exceptions = true); 257 258 // Overloadings with last_Java_sp 259 void call_VM(Register oop_result, 260 Register last_java_sp, 261 address entry_point, 262 int number_of_arguments = 0, 263 bool check_exceptions = true); 264 void call_VM(Register oop_result, 265 Register last_java_sp, 266 address entry_point, 267 Register arg_1, bool 268 check_exceptions = true); 269 void call_VM(Register oop_result, 270 Register last_java_sp, 271 address entry_point, 272 Register arg_1, Register arg_2, 273 bool check_exceptions = true); 274 void call_VM(Register oop_result, 275 Register last_java_sp, 276 address entry_point, 277 Register arg_1, Register arg_2, Register arg_3, 278 bool check_exceptions = true); 279 280 void get_vm_result (Register oop_result, Register thread); 281 void get_vm_result_2(Register metadata_result, Register thread); 282 283 // These always tightly bind to MacroAssembler::call_VM_base 284 // bypassing the virtual implementation 285 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 286 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 287 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 288 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 289 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 290 291 void call_VM_leaf0(address entry_point); 292 void call_VM_leaf(address entry_point, 293 int number_of_arguments = 0); 294 void call_VM_leaf(address entry_point, 295 Register arg_1); 296 void call_VM_leaf(address entry_point, 297 Register arg_1, Register arg_2); 298 void call_VM_leaf(address entry_point, 299 Register arg_1, Register arg_2, Register arg_3); 300 301 // These always tightly bind to MacroAssembler::call_VM_leaf_base 302 // bypassing the virtual implementation 303 void super_call_VM_leaf(address entry_point); 304 void super_call_VM_leaf(address entry_point, Register arg_1); 305 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 306 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 307 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 308 309 // last Java Frame (fills frame anchor) 310 void set_last_Java_frame(Register thread, 311 Register last_java_sp, 312 Register last_java_fp, 313 address last_java_pc); 314 315 // thread in the default location (r15_thread on 64bit) 316 void set_last_Java_frame(Register last_java_sp, 317 Register last_java_fp, 318 address last_java_pc); 319 320 void reset_last_Java_frame(Register thread, bool clear_fp); 321 322 // thread in the default location (r15_thread on 64bit) 323 void reset_last_Java_frame(bool clear_fp); 324 325 // jobjects 326 void clear_jweak_tag(Register possibly_jweak); 327 void resolve_jobject(Register value, Register thread, Register tmp); 328 329 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 330 void c2bool(Register x); 331 332 // C++ bool manipulation 333 334 void movbool(Register dst, Address src); 335 void movbool(Address dst, bool boolconst); 336 void movbool(Address dst, Register src); 337 void testbool(Register dst); 338 339 void resolve_oop_handle(Register result, Register tmp = rscratch2); 340 void resolve_weak_handle(Register result, Register tmp); 341 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 342 void load_method_holder_cld(Register rresult, Register rmethod); 343 344 void load_method_holder(Register holder, Register method); 345 346 // oop manipulations 347 void load_metadata(Register dst, Register src); 348 void load_storage_props(Register dst, Register src); 349 void load_klass(Register dst, Register src); 350 void store_klass(Register dst, Register src); 351 352 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 353 Register tmp1, Register thread_tmp); 354 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 355 Register tmp1, Register tmp2, Register tmp3 = noreg); 356 357 void access_value_copy(DecoratorSet decorators, Register src, Register dst, Register value_klass); 358 359 // value type data payload offsets... 360 void first_field_offset(Register value_klass, Register offset); 361 void data_for_oop(Register oop, Register data, Register value_klass); 362 // get data payload ptr a flat value array at index, kills rcx and index 363 void data_for_value_array_index(Register array, Register array_klass, 364 Register index, Register data); 365 366 367 // Resolves obj access. Result is placed in the same register. 368 // All other registers are preserved. 369 void resolve(DecoratorSet decorators, Register obj); 370 371 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 372 Register thread_tmp = noreg, DecoratorSet decorators = 0); 373 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 374 Register thread_tmp = noreg, DecoratorSet decorators = 0); 375 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 376 Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 377 378 // Used for storing NULL. All other oop constants should be 379 // stored using routines that take a jobject. 380 void store_heap_oop_null(Address dst); 381 382 void load_prototype_header(Register dst, Register src); 383 384 #ifdef _LP64 385 void store_klass_gap(Register dst, Register src); 386 387 // This dummy is to prevent a call to store_heap_oop from 388 // converting a zero (like NULL) into a Register by giving 389 // the compiler two choices it can't resolve 390 391 void store_heap_oop(Address dst, void* dummy); 392 393 void encode_heap_oop(Register r); 394 void decode_heap_oop(Register r); 395 void encode_heap_oop_not_null(Register r); 396 void decode_heap_oop_not_null(Register r); 397 void encode_heap_oop_not_null(Register dst, Register src); 398 void decode_heap_oop_not_null(Register dst, Register src); 399 400 void set_narrow_oop(Register dst, jobject obj); 401 void set_narrow_oop(Address dst, jobject obj); 402 void cmp_narrow_oop(Register dst, jobject obj); 403 void cmp_narrow_oop(Address dst, jobject obj); 404 405 void encode_klass_not_null(Register r); 406 void decode_klass_not_null(Register r); 407 void encode_klass_not_null(Register dst, Register src); 408 void decode_klass_not_null(Register dst, Register src); 409 void set_narrow_klass(Register dst, Klass* k); 410 void set_narrow_klass(Address dst, Klass* k); 411 void cmp_narrow_klass(Register dst, Klass* k); 412 void cmp_narrow_klass(Address dst, Klass* k); 413 414 // Returns the byte size of the instructions generated by decode_klass_not_null() 415 // when compressed klass pointers are being used. 416 static int instr_size_for_decode_klass_not_null(); 417 418 // if heap base register is used - reinit it with the correct value 419 void reinit_heapbase(); 420 421 DEBUG_ONLY(void verify_heapbase(const char* msg);) 422 423 #endif // _LP64 424 425 // Int division/remainder for Java 426 // (as idivl, but checks for special case as described in JVM spec.) 427 // returns idivl instruction offset for implicit exception handling 428 int corrected_idivl(Register reg); 429 430 // Long division/remainder for Java 431 // (as idivq, but checks for special case as described in JVM spec.) 432 // returns idivq instruction offset for implicit exception handling 433 int corrected_idivq(Register reg); 434 435 void int3(); 436 437 // Long operation macros for a 32bit cpu 438 // Long negation for Java 439 void lneg(Register hi, Register lo); 440 441 // Long multiplication for Java 442 // (destroys contents of eax, ebx, ecx and edx) 443 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 444 445 // Long shifts for Java 446 // (semantics as described in JVM spec.) 447 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 448 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 449 450 // Long compare for Java 451 // (semantics as described in JVM spec.) 452 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 453 454 455 // misc 456 457 // Sign extension 458 void sign_extend_short(Register reg); 459 void sign_extend_byte(Register reg); 460 461 // Division by power of 2, rounding towards 0 462 void division_with_shift(Register reg, int shift_value); 463 464 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 465 // 466 // CF (corresponds to C0) if x < y 467 // PF (corresponds to C2) if unordered 468 // ZF (corresponds to C3) if x = y 469 // 470 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 471 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 472 void fcmp(Register tmp); 473 // Variant of the above which allows y to be further down the stack 474 // and which only pops x and y if specified. If pop_right is 475 // specified then pop_left must also be specified. 476 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 477 478 // Floating-point comparison for Java 479 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 480 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 481 // (semantics as described in JVM spec.) 482 void fcmp2int(Register dst, bool unordered_is_less); 483 // Variant of the above which allows y to be further down the stack 484 // and which only pops x and y if specified. If pop_right is 485 // specified then pop_left must also be specified. 486 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 487 488 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 489 // tmp is a temporary register, if none is available use noreg 490 void fremr(Register tmp); 491 492 // dst = c = a * b + c 493 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 494 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 495 496 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 497 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 498 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 499 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 500 501 502 // same as fcmp2int, but using SSE2 503 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 504 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 505 506 // branch to L if FPU flag C2 is set/not set 507 // tmp is a temporary register, if none is available use noreg 508 void jC2 (Register tmp, Label& L); 509 void jnC2(Register tmp, Label& L); 510 511 // Pop ST (ffree & fincstp combined) 512 void fpop(); 513 514 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 515 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 516 void load_float(Address src); 517 518 // Store float value to 'address'. If UseSSE >= 1, the value is stored 519 // from register xmm0. Otherwise, the value is stored from the FPU stack. 520 void store_float(Address dst); 521 522 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 523 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 524 void load_double(Address src); 525 526 // Store double value to 'address'. If UseSSE >= 2, the value is stored 527 // from register xmm0. Otherwise, the value is stored from the FPU stack. 528 void store_double(Address dst); 529 530 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 531 void push_fTOS(); 532 533 // pops double TOS element from CPU stack and pushes on FPU stack 534 void pop_fTOS(); 535 536 void empty_FPU_stack(); 537 538 void push_IU_state(); 539 void pop_IU_state(); 540 541 void push_FPU_state(); 542 void pop_FPU_state(); 543 544 void push_CPU_state(); 545 void pop_CPU_state(); 546 547 // Round up to a power of two 548 void round_to(Register reg, int modulus); 549 550 // Callee saved registers handling 551 void push_callee_saved_registers(); 552 void pop_callee_saved_registers(); 553 554 // allocation 555 556 // Object / value buffer allocation... 557 // Allocate instance of klass, assumes klass initialized by caller 558 // new_obj prefers to be rax 559 // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64) 560 void allocate_instance(Register klass, Register new_obj, 561 Register t1, Register t2, 562 bool clear_fields, Label& alloc_failed); 563 564 void eden_allocate( 565 Register thread, // Current thread 566 Register obj, // result: pointer to object after successful allocation 567 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 568 int con_size_in_bytes, // object size in bytes if known at compile time 569 Register t1, // temp register 570 Label& slow_case // continuation point if fast allocation fails 571 ); 572 void tlab_allocate( 573 Register thread, // Current thread 574 Register obj, // result: pointer to object after successful allocation 575 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 576 int con_size_in_bytes, // object size in bytes if known at compile time 577 Register t1, // temp register 578 Register t2, // temp register 579 Label& slow_case // continuation point if fast allocation fails 580 ); 581 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 582 583 // For field "index" within "klass", return value_klass ... 584 void get_value_field_klass(Register klass, Register index, Register value_klass); 585 586 // interface method calling 587 void lookup_interface_method(Register recv_klass, 588 Register intf_klass, 589 RegisterOrConstant itable_index, 590 Register method_result, 591 Register scan_temp, 592 Label& no_such_interface, 593 bool return_method = true); 594 595 // virtual method calling 596 void lookup_virtual_method(Register recv_klass, 597 RegisterOrConstant vtable_index, 598 Register method_result); 599 600 // Test sub_klass against super_klass, with fast and slow paths. 601 602 // The fast path produces a tri-state answer: yes / no / maybe-slow. 603 // One of the three labels can be NULL, meaning take the fall-through. 604 // If super_check_offset is -1, the value is loaded up from super_klass. 605 // No registers are killed, except temp_reg. 606 void check_klass_subtype_fast_path(Register sub_klass, 607 Register super_klass, 608 Register temp_reg, 609 Label* L_success, 610 Label* L_failure, 611 Label* L_slow_path, 612 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 613 614 // The rest of the type check; must be wired to a corresponding fast path. 615 // It does not repeat the fast path logic, so don't use it standalone. 616 // The temp_reg and temp2_reg can be noreg, if no temps are available. 617 // Updates the sub's secondary super cache as necessary. 618 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 619 void check_klass_subtype_slow_path(Register sub_klass, 620 Register super_klass, 621 Register temp_reg, 622 Register temp2_reg, 623 Label* L_success, 624 Label* L_failure, 625 bool set_cond_codes = false); 626 627 // Simplified, combined version, good for typical uses. 628 // Falls through on failure. 629 void check_klass_subtype(Register sub_klass, 630 Register super_klass, 631 Register temp_reg, 632 Label& L_success); 633 634 void clinit_barrier(Register klass, 635 Register thread, 636 Label* L_fast_path = NULL, 637 Label* L_slow_path = NULL); 638 639 // method handles (JSR 292) 640 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 641 642 //---- 643 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 644 645 // Debugging 646 647 // only if +VerifyOops 648 // TODO: Make these macros with file and line like sparc version! 649 void verify_oop(Register reg, const char* s = "broken oop"); 650 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 651 652 // TODO: verify method and klass metadata (compare against vptr?) 653 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 654 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 655 656 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 657 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 658 659 // only if +VerifyFPU 660 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 661 662 // Verify or restore cpu control state after JNI call 663 void restore_cpu_control_state_after_jni(); 664 665 // prints msg, dumps registers and stops execution 666 void stop(const char* msg); 667 668 // prints msg and continues 669 void warn(const char* msg); 670 671 // dumps registers and other state 672 void print_state(); 673 674 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 675 static void debug64(char* msg, int64_t pc, int64_t regs[]); 676 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 677 static void print_state64(int64_t pc, int64_t regs[]); 678 679 void os_breakpoint(); 680 681 void untested() { stop("untested"); } 682 683 void unimplemented(const char* what = ""); 684 685 void should_not_reach_here() { stop("should not reach here"); } 686 687 void print_CPU_state(); 688 689 // Stack overflow checking 690 void bang_stack_with_offset(int offset) { 691 // stack grows down, caller passes positive offset 692 assert(offset > 0, "must bang with negative offset"); 693 movl(Address(rsp, (-offset)), rax); 694 } 695 696 // Writes to stack successive pages until offset reached to check for 697 // stack overflow + shadow pages. Also, clobbers tmp 698 void bang_stack_size(Register size, Register tmp); 699 700 // Check for reserved stack access in method being exited (for JIT) 701 void reserved_stack_check(); 702 703 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 704 Register tmp, 705 int offset); 706 707 // If thread_reg is != noreg the code assumes the register passed contains 708 // the thread (required on 64 bit). 709 void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); 710 711 void verify_tlab(); 712 713 // Biased locking support 714 // lock_reg and obj_reg must be loaded up with the appropriate values. 715 // swap_reg must be rax, and is killed. 716 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 717 // be killed; if not supplied, push/pop will be used internally to 718 // allocate a temporary (inefficient, avoid if possible). 719 // Optional slow case is for implementations (interpreter and C1) which branch to 720 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 721 // Returns offset of first potentially-faulting instruction for null 722 // check info (currently consumed only by C1). If 723 // swap_reg_contains_mark is true then returns -1 as it is assumed 724 // the calling code has already passed any potential faults. 725 int biased_locking_enter(Register lock_reg, Register obj_reg, 726 Register swap_reg, Register tmp_reg, 727 bool swap_reg_contains_mark, 728 Label& done, Label* slow_case = NULL, 729 BiasedLockingCounters* counters = NULL); 730 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 731 #ifdef COMPILER2 732 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 733 // See full desription in macroAssembler_x86.cpp. 734 void fast_lock(Register obj, Register box, Register tmp, 735 Register scr, Register cx1, Register cx2, 736 BiasedLockingCounters* counters, 737 RTMLockingCounters* rtm_counters, 738 RTMLockingCounters* stack_rtm_counters, 739 Metadata* method_data, 740 bool use_rtm, bool profile_rtm); 741 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 742 #if INCLUDE_RTM_OPT 743 void rtm_counters_update(Register abort_status, Register rtm_counters); 744 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 745 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 746 RTMLockingCounters* rtm_counters, 747 Metadata* method_data); 748 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 749 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 750 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 751 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 752 void rtm_stack_locking(Register obj, Register tmp, Register scr, 753 Register retry_on_abort_count, 754 RTMLockingCounters* stack_rtm_counters, 755 Metadata* method_data, bool profile_rtm, 756 Label& DONE_LABEL, Label& IsInflated); 757 void rtm_inflated_locking(Register obj, Register box, Register tmp, 758 Register scr, Register retry_on_busy_count, 759 Register retry_on_abort_count, 760 RTMLockingCounters* rtm_counters, 761 Metadata* method_data, bool profile_rtm, 762 Label& DONE_LABEL); 763 #endif 764 #endif 765 766 Condition negate_condition(Condition cond); 767 768 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 769 // operands. In general the names are modified to avoid hiding the instruction in Assembler 770 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 771 // here in MacroAssembler. The major exception to this rule is call 772 773 // Arithmetics 774 775 776 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 777 void addptr(Address dst, Register src); 778 779 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 780 void addptr(Register dst, int32_t src); 781 void addptr(Register dst, Register src); 782 void addptr(Register dst, RegisterOrConstant src) { 783 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 784 else addptr(dst, src.as_register()); 785 } 786 787 void andptr(Register dst, int32_t src); 788 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 789 790 void cmp8(AddressLiteral src1, int imm); 791 792 // renamed to drag out the casting of address to int32_t/intptr_t 793 void cmp32(Register src1, int32_t imm); 794 795 void cmp32(AddressLiteral src1, int32_t imm); 796 // compare reg - mem, or reg - &mem 797 void cmp32(Register src1, AddressLiteral src2); 798 799 void cmp32(Register src1, Address src2); 800 801 #ifndef _LP64 802 void cmpklass(Address dst, Metadata* obj); 803 void cmpklass(Register dst, Metadata* obj); 804 void cmpoop(Address dst, jobject obj); 805 void cmpoop_raw(Address dst, jobject obj); 806 #endif // _LP64 807 808 void cmpoop(Register src1, Register src2); 809 void cmpoop(Register src1, Address src2); 810 void cmpoop(Register dst, jobject obj); 811 void cmpoop_raw(Register dst, jobject obj); 812 813 // NOTE src2 must be the lval. This is NOT an mem-mem compare 814 void cmpptr(Address src1, AddressLiteral src2); 815 816 void cmpptr(Register src1, AddressLiteral src2); 817 818 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 819 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 820 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 821 822 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 823 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 824 825 // cmp64 to avoild hiding cmpq 826 void cmp64(Register src1, AddressLiteral src); 827 828 void cmpxchgptr(Register reg, Address adr); 829 830 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 831 832 833 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 834 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 835 836 837 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 838 839 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 840 841 void shlptr(Register dst, int32_t shift); 842 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 843 844 void shrptr(Register dst, int32_t shift); 845 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 846 847 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 848 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 849 850 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 851 852 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 853 void subptr(Register dst, int32_t src); 854 // Force generation of a 4 byte immediate value even if it fits into 8bit 855 void subptr_imm32(Register dst, int32_t src); 856 void subptr(Register dst, Register src); 857 void subptr(Register dst, RegisterOrConstant src) { 858 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 859 else subptr(dst, src.as_register()); 860 } 861 862 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 863 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 864 865 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 866 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 867 868 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 869 870 871 872 // Helper functions for statistics gathering. 873 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 874 void cond_inc32(Condition cond, AddressLiteral counter_addr); 875 // Unconditional atomic increment. 876 void atomic_incl(Address counter_addr); 877 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 878 #ifdef _LP64 879 void atomic_incq(Address counter_addr); 880 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 881 #endif 882 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 883 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 884 885 void lea(Register dst, AddressLiteral adr); 886 void lea(Address dst, AddressLiteral adr); 887 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 888 889 void leal32(Register dst, Address src) { leal(dst, src); } 890 891 // Import other testl() methods from the parent class or else 892 // they will be hidden by the following overriding declaration. 893 using Assembler::testl; 894 void testl(Register dst, AddressLiteral src); 895 896 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 897 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 898 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 899 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 900 901 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 902 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 903 void testptr(Register src1, Register src2); 904 905 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 906 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 907 908 // Calls 909 910 void call(Label& L, relocInfo::relocType rtype); 911 void call(Register entry); 912 913 // NOTE: this call transfers to the effective address of entry NOT 914 // the address contained by entry. This is because this is more natural 915 // for jumps/calls. 916 void call(AddressLiteral entry); 917 918 // Emit the CompiledIC call idiom 919 void ic_call(address entry, jint method_index = 0); 920 921 // Jumps 922 923 // NOTE: these jumps tranfer to the effective address of dst NOT 924 // the address contained by dst. This is because this is more natural 925 // for jumps/calls. 926 void jump(AddressLiteral dst); 927 void jump_cc(Condition cc, AddressLiteral dst); 928 929 // 32bit can do a case table jump in one instruction but we no longer allow the base 930 // to be installed in the Address class. This jump will tranfers to the address 931 // contained in the location described by entry (not the address of entry) 932 void jump(ArrayAddress entry); 933 934 // Floating 935 936 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 937 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 938 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 939 940 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 941 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 942 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 943 944 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 945 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 946 void comiss(XMMRegister dst, AddressLiteral src); 947 948 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 949 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 950 void comisd(XMMRegister dst, AddressLiteral src); 951 952 void fadd_s(Address src) { Assembler::fadd_s(src); } 953 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 954 955 void fldcw(Address src) { Assembler::fldcw(src); } 956 void fldcw(AddressLiteral src); 957 958 void fld_s(int index) { Assembler::fld_s(index); } 959 void fld_s(Address src) { Assembler::fld_s(src); } 960 void fld_s(AddressLiteral src); 961 962 void fld_d(Address src) { Assembler::fld_d(src); } 963 void fld_d(AddressLiteral src); 964 965 void fld_x(Address src) { Assembler::fld_x(src); } 966 void fld_x(AddressLiteral src); 967 968 void fmul_s(Address src) { Assembler::fmul_s(src); } 969 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 970 971 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 972 void ldmxcsr(AddressLiteral src); 973 974 #ifdef _LP64 975 private: 976 void sha256_AVX2_one_round_compute( 977 Register reg_old_h, 978 Register reg_a, 979 Register reg_b, 980 Register reg_c, 981 Register reg_d, 982 Register reg_e, 983 Register reg_f, 984 Register reg_g, 985 Register reg_h, 986 int iter); 987 void sha256_AVX2_four_rounds_compute_first(int start); 988 void sha256_AVX2_four_rounds_compute_last(int start); 989 void sha256_AVX2_one_round_and_sched( 990 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 991 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 992 XMMRegister xmm_2, /* ymm6 */ 993 XMMRegister xmm_3, /* ymm7 */ 994 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 995 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 996 Register reg_c, /* edi */ 997 Register reg_d, /* esi */ 998 Register reg_e, /* r8d */ 999 Register reg_f, /* r9d */ 1000 Register reg_g, /* r10d */ 1001 Register reg_h, /* r11d */ 1002 int iter); 1003 1004 void addm(int disp, Register r1, Register r2); 1005 void gfmul(XMMRegister tmp0, XMMRegister t); 1006 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 1007 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 1008 void generateHtbl_one_block(Register htbl); 1009 void generateHtbl_eight_blocks(Register htbl); 1010 public: 1011 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1012 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1013 Register buf, Register state, Register ofs, Register limit, Register rsp, 1014 bool multi_block, XMMRegister shuf_mask); 1015 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 1016 #endif 1017 1018 #ifdef _LP64 1019 private: 1020 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 1021 Register e, Register f, Register g, Register h, int iteration); 1022 1023 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1024 Register a, Register b, Register c, Register d, Register e, Register f, 1025 Register g, Register h, int iteration); 1026 1027 void addmq(int disp, Register r1, Register r2); 1028 public: 1029 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1030 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1031 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 1032 XMMRegister shuf_mask); 1033 private: 1034 void roundEnc(XMMRegister key, int rnum); 1035 void lastroundEnc(XMMRegister key, int rnum); 1036 void roundDec(XMMRegister key, int rnum); 1037 void lastroundDec(XMMRegister key, int rnum); 1038 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 1039 1040 public: 1041 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 1042 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 1043 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 1044 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 1045 1046 #endif 1047 1048 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 1049 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 1050 Register buf, Register state, Register ofs, Register limit, Register rsp, 1051 bool multi_block); 1052 1053 #ifdef _LP64 1054 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1055 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1056 Register buf, Register state, Register ofs, Register limit, Register rsp, 1057 bool multi_block, XMMRegister shuf_mask); 1058 #else 1059 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 1060 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 1061 Register buf, Register state, Register ofs, Register limit, Register rsp, 1062 bool multi_block); 1063 #endif 1064 1065 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1066 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1067 Register rax, Register rcx, Register rdx, Register tmp); 1068 1069 #ifdef _LP64 1070 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1071 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1072 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 1073 1074 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1075 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1076 Register rax, Register rcx, Register rdx, Register r11); 1077 1078 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1079 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1080 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 1081 1082 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1083 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1084 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 1085 Register tmp3, Register tmp4); 1086 1087 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1088 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1089 Register rax, Register rcx, Register rdx, Register tmp1, 1090 Register tmp2, Register tmp3, Register tmp4); 1091 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1092 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1093 Register rax, Register rcx, Register rdx, Register tmp1, 1094 Register tmp2, Register tmp3, Register tmp4); 1095 #else 1096 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1097 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1098 Register rax, Register rcx, Register rdx, Register tmp1); 1099 1100 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1101 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1102 Register rax, Register rcx, Register rdx, Register tmp); 1103 1104 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1105 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1106 Register rdx, Register tmp); 1107 1108 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1109 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1110 Register rax, Register rbx, Register rdx); 1111 1112 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1113 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1114 Register rax, Register rcx, Register rdx, Register tmp); 1115 1116 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1117 Register edx, Register ebx, Register esi, Register edi, 1118 Register ebp, Register esp); 1119 1120 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1121 Register esi, Register edi, Register ebp, Register esp); 1122 1123 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1124 Register edx, Register ebx, Register esi, Register edi, 1125 Register ebp, Register esp); 1126 1127 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1128 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1129 Register rax, Register rcx, Register rdx, Register tmp); 1130 #endif 1131 1132 void increase_precision(); 1133 void restore_precision(); 1134 1135 private: 1136 1137 // these are private because users should be doing movflt/movdbl 1138 1139 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1140 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1141 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1142 void movss(XMMRegister dst, AddressLiteral src); 1143 1144 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1145 void movlpd(XMMRegister dst, AddressLiteral src); 1146 1147 public: 1148 1149 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1150 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1151 void addsd(XMMRegister dst, AddressLiteral src); 1152 1153 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1154 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1155 void addss(XMMRegister dst, AddressLiteral src); 1156 1157 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1158 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1159 void addpd(XMMRegister dst, AddressLiteral src); 1160 1161 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1162 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1163 void divsd(XMMRegister dst, AddressLiteral src); 1164 1165 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1166 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1167 void divss(XMMRegister dst, AddressLiteral src); 1168 1169 // Move Unaligned Double Quadword 1170 void movdqu(Address dst, XMMRegister src); 1171 void movdqu(XMMRegister dst, Address src); 1172 void movdqu(XMMRegister dst, XMMRegister src); 1173 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1174 // AVX Unaligned forms 1175 void vmovdqu(Address dst, XMMRegister src); 1176 void vmovdqu(XMMRegister dst, Address src); 1177 void vmovdqu(XMMRegister dst, XMMRegister src); 1178 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1179 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1180 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1181 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1182 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1183 1184 // Move Aligned Double Quadword 1185 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1186 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1187 void movdqa(XMMRegister dst, AddressLiteral src); 1188 1189 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1190 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1191 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1192 void movsd(XMMRegister dst, AddressLiteral src); 1193 1194 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1195 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1196 void mulpd(XMMRegister dst, AddressLiteral src); 1197 1198 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1199 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1200 void mulsd(XMMRegister dst, AddressLiteral src); 1201 1202 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1203 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1204 void mulss(XMMRegister dst, AddressLiteral src); 1205 1206 // Carry-Less Multiplication Quadword 1207 void pclmulldq(XMMRegister dst, XMMRegister src) { 1208 // 0x00 - multiply lower 64 bits [0:63] 1209 Assembler::pclmulqdq(dst, src, 0x00); 1210 } 1211 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1212 // 0x11 - multiply upper 64 bits [64:127] 1213 Assembler::pclmulqdq(dst, src, 0x11); 1214 } 1215 1216 void pcmpeqb(XMMRegister dst, XMMRegister src); 1217 void pcmpeqw(XMMRegister dst, XMMRegister src); 1218 1219 void pcmpestri(XMMRegister dst, Address src, int imm8); 1220 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1221 1222 void pmovzxbw(XMMRegister dst, XMMRegister src); 1223 void pmovzxbw(XMMRegister dst, Address src); 1224 1225 void pmovmskb(Register dst, XMMRegister src); 1226 1227 void ptest(XMMRegister dst, XMMRegister src); 1228 1229 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1230 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1231 void sqrtsd(XMMRegister dst, AddressLiteral src); 1232 1233 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1234 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1235 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1236 1237 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1238 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1239 void sqrtss(XMMRegister dst, AddressLiteral src); 1240 1241 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1242 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1243 void subsd(XMMRegister dst, AddressLiteral src); 1244 1245 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1246 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1247 void subss(XMMRegister dst, AddressLiteral src); 1248 1249 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1250 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1251 void ucomiss(XMMRegister dst, AddressLiteral src); 1252 1253 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1254 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1255 void ucomisd(XMMRegister dst, AddressLiteral src); 1256 1257 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1258 void xorpd(XMMRegister dst, XMMRegister src); 1259 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1260 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1261 1262 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1263 void xorps(XMMRegister dst, XMMRegister src); 1264 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1265 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1266 1267 // Shuffle Bytes 1268 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1269 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1270 void pshufb(XMMRegister dst, AddressLiteral src); 1271 // AVX 3-operands instructions 1272 1273 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1274 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1275 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1276 1277 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1278 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1279 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1280 1281 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1282 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1283 1284 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1285 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1286 1287 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1288 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1289 1290 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1291 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1292 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1293 1294 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1295 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1296 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1297 1298 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1299 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1300 1301 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1302 1303 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1304 1305 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1306 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1307 1308 void vpmovmskb(Register dst, XMMRegister src); 1309 1310 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1311 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1312 1313 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1314 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1315 1316 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1317 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1318 1319 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1320 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1321 1322 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1323 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1324 1325 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1326 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1327 1328 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1329 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1330 1331 void vptest(XMMRegister dst, XMMRegister src); 1332 1333 void punpcklbw(XMMRegister dst, XMMRegister src); 1334 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1335 1336 void pshufd(XMMRegister dst, Address src, int mode); 1337 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1338 1339 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1340 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1341 1342 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1343 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1344 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1345 1346 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1347 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1348 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1349 1350 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1351 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1352 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1353 1354 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1355 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1356 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1357 1358 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1359 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1360 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1361 1362 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1363 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1364 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1365 1366 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1367 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1368 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1369 1370 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1371 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1372 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1373 1374 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1375 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1376 1377 // AVX Vector instructions 1378 1379 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1380 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1381 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1382 1383 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1384 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1385 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1386 1387 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1388 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1389 Assembler::vpxor(dst, nds, src, vector_len); 1390 else 1391 Assembler::vxorpd(dst, nds, src, vector_len); 1392 } 1393 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1394 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1395 Assembler::vpxor(dst, nds, src, vector_len); 1396 else 1397 Assembler::vxorpd(dst, nds, src, vector_len); 1398 } 1399 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1400 1401 // Simple version for AVX2 256bit vectors 1402 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1403 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1404 1405 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1406 if (UseAVX > 2) { 1407 Assembler::vinserti32x4(dst, dst, src, imm8); 1408 } else if (UseAVX > 1) { 1409 // vinserti128 is available only in AVX2 1410 Assembler::vinserti128(dst, nds, src, imm8); 1411 } else { 1412 Assembler::vinsertf128(dst, nds, src, imm8); 1413 } 1414 } 1415 1416 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1417 if (UseAVX > 2) { 1418 Assembler::vinserti32x4(dst, dst, src, imm8); 1419 } else if (UseAVX > 1) { 1420 // vinserti128 is available only in AVX2 1421 Assembler::vinserti128(dst, nds, src, imm8); 1422 } else { 1423 Assembler::vinsertf128(dst, nds, src, imm8); 1424 } 1425 } 1426 1427 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1428 if (UseAVX > 2) { 1429 Assembler::vextracti32x4(dst, src, imm8); 1430 } else if (UseAVX > 1) { 1431 // vextracti128 is available only in AVX2 1432 Assembler::vextracti128(dst, src, imm8); 1433 } else { 1434 Assembler::vextractf128(dst, src, imm8); 1435 } 1436 } 1437 1438 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1439 if (UseAVX > 2) { 1440 Assembler::vextracti32x4(dst, src, imm8); 1441 } else if (UseAVX > 1) { 1442 // vextracti128 is available only in AVX2 1443 Assembler::vextracti128(dst, src, imm8); 1444 } else { 1445 Assembler::vextractf128(dst, src, imm8); 1446 } 1447 } 1448 1449 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1450 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1451 vinserti128(dst, dst, src, 1); 1452 } 1453 void vinserti128_high(XMMRegister dst, Address src) { 1454 vinserti128(dst, dst, src, 1); 1455 } 1456 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1457 vextracti128(dst, src, 1); 1458 } 1459 void vextracti128_high(Address dst, XMMRegister src) { 1460 vextracti128(dst, src, 1); 1461 } 1462 1463 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1464 if (UseAVX > 2) { 1465 Assembler::vinsertf32x4(dst, dst, src, 1); 1466 } else { 1467 Assembler::vinsertf128(dst, dst, src, 1); 1468 } 1469 } 1470 1471 void vinsertf128_high(XMMRegister dst, Address src) { 1472 if (UseAVX > 2) { 1473 Assembler::vinsertf32x4(dst, dst, src, 1); 1474 } else { 1475 Assembler::vinsertf128(dst, dst, src, 1); 1476 } 1477 } 1478 1479 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1480 if (UseAVX > 2) { 1481 Assembler::vextractf32x4(dst, src, 1); 1482 } else { 1483 Assembler::vextractf128(dst, src, 1); 1484 } 1485 } 1486 1487 void vextractf128_high(Address dst, XMMRegister src) { 1488 if (UseAVX > 2) { 1489 Assembler::vextractf32x4(dst, src, 1); 1490 } else { 1491 Assembler::vextractf128(dst, src, 1); 1492 } 1493 } 1494 1495 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1496 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1497 Assembler::vinserti64x4(dst, dst, src, 1); 1498 } 1499 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1500 Assembler::vinsertf64x4(dst, dst, src, 1); 1501 } 1502 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1503 Assembler::vextracti64x4(dst, src, 1); 1504 } 1505 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1506 Assembler::vextractf64x4(dst, src, 1); 1507 } 1508 void vextractf64x4_high(Address dst, XMMRegister src) { 1509 Assembler::vextractf64x4(dst, src, 1); 1510 } 1511 void vinsertf64x4_high(XMMRegister dst, Address src) { 1512 Assembler::vinsertf64x4(dst, dst, src, 1); 1513 } 1514 1515 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1516 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1517 vinserti128(dst, dst, src, 0); 1518 } 1519 void vinserti128_low(XMMRegister dst, Address src) { 1520 vinserti128(dst, dst, src, 0); 1521 } 1522 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1523 vextracti128(dst, src, 0); 1524 } 1525 void vextracti128_low(Address dst, XMMRegister src) { 1526 vextracti128(dst, src, 0); 1527 } 1528 1529 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1530 if (UseAVX > 2) { 1531 Assembler::vinsertf32x4(dst, dst, src, 0); 1532 } else { 1533 Assembler::vinsertf128(dst, dst, src, 0); 1534 } 1535 } 1536 1537 void vinsertf128_low(XMMRegister dst, Address src) { 1538 if (UseAVX > 2) { 1539 Assembler::vinsertf32x4(dst, dst, src, 0); 1540 } else { 1541 Assembler::vinsertf128(dst, dst, src, 0); 1542 } 1543 } 1544 1545 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1546 if (UseAVX > 2) { 1547 Assembler::vextractf32x4(dst, src, 0); 1548 } else { 1549 Assembler::vextractf128(dst, src, 0); 1550 } 1551 } 1552 1553 void vextractf128_low(Address dst, XMMRegister src) { 1554 if (UseAVX > 2) { 1555 Assembler::vextractf32x4(dst, src, 0); 1556 } else { 1557 Assembler::vextractf128(dst, src, 0); 1558 } 1559 } 1560 1561 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1562 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1563 Assembler::vinserti64x4(dst, dst, src, 0); 1564 } 1565 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1566 Assembler::vinsertf64x4(dst, dst, src, 0); 1567 } 1568 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1569 Assembler::vextracti64x4(dst, src, 0); 1570 } 1571 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1572 Assembler::vextractf64x4(dst, src, 0); 1573 } 1574 void vextractf64x4_low(Address dst, XMMRegister src) { 1575 Assembler::vextractf64x4(dst, src, 0); 1576 } 1577 void vinsertf64x4_low(XMMRegister dst, Address src) { 1578 Assembler::vinsertf64x4(dst, dst, src, 0); 1579 } 1580 1581 // Carry-Less Multiplication Quadword 1582 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1583 // 0x00 - multiply lower 64 bits [0:63] 1584 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1585 } 1586 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1587 // 0x11 - multiply upper 64 bits [64:127] 1588 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1589 } 1590 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1591 // 0x10 - multiply nds[0:63] and src[64:127] 1592 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1593 } 1594 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1595 //0x01 - multiply nds[64:127] and src[0:63] 1596 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1597 } 1598 1599 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1600 // 0x00 - multiply lower 64 bits [0:63] 1601 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1602 } 1603 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1604 // 0x11 - multiply upper 64 bits [64:127] 1605 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1606 } 1607 1608 // Data 1609 1610 void cmov32( Condition cc, Register dst, Address src); 1611 void cmov32( Condition cc, Register dst, Register src); 1612 1613 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1614 1615 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1616 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1617 1618 void movoop(Register dst, jobject obj); 1619 void movoop(Address dst, jobject obj); 1620 1621 void mov_metadata(Register dst, Metadata* obj); 1622 void mov_metadata(Address dst, Metadata* obj); 1623 1624 void movptr(ArrayAddress dst, Register src); 1625 // can this do an lea? 1626 void movptr(Register dst, ArrayAddress src); 1627 1628 void movptr(Register dst, Address src); 1629 1630 #ifdef _LP64 1631 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1632 #else 1633 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1634 #endif 1635 1636 void movptr(Register dst, intptr_t src); 1637 void movptr(Register dst, Register src); 1638 void movptr(Address dst, intptr_t src); 1639 1640 void movptr(Address dst, Register src); 1641 1642 void movptr(Register dst, RegisterOrConstant src) { 1643 if (src.is_constant()) movptr(dst, src.as_constant()); 1644 else movptr(dst, src.as_register()); 1645 } 1646 1647 #ifdef _LP64 1648 // Generally the next two are only used for moving NULL 1649 // Although there are situations in initializing the mark word where 1650 // they could be used. They are dangerous. 1651 1652 // They only exist on LP64 so that int32_t and intptr_t are not the same 1653 // and we have ambiguous declarations. 1654 1655 void movptr(Address dst, int32_t imm32); 1656 void movptr(Register dst, int32_t imm32); 1657 #endif // _LP64 1658 1659 // to avoid hiding movl 1660 void mov32(AddressLiteral dst, Register src); 1661 void mov32(Register dst, AddressLiteral src); 1662 1663 // to avoid hiding movb 1664 void movbyte(ArrayAddress dst, int src); 1665 1666 // Import other mov() methods from the parent class or else 1667 // they will be hidden by the following overriding declaration. 1668 using Assembler::movdl; 1669 using Assembler::movq; 1670 void movdl(XMMRegister dst, AddressLiteral src); 1671 void movq(XMMRegister dst, AddressLiteral src); 1672 1673 // Can push value or effective address 1674 void pushptr(AddressLiteral src); 1675 1676 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1677 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1678 1679 void pushoop(jobject obj); 1680 void pushklass(Metadata* obj); 1681 1682 // sign extend as need a l to ptr sized element 1683 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1684 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1685 1686 #ifdef COMPILER2 1687 // Generic instructions support for use in .ad files C2 code generation 1688 void vabsnegd(int opcode, XMMRegister dst, Register scr); 1689 void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1690 void vabsnegf(int opcode, XMMRegister dst, Register scr); 1691 void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); 1692 void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); 1693 void vextendbw(bool sign, XMMRegister dst, XMMRegister src); 1694 void vshiftd(int opcode, XMMRegister dst, XMMRegister src); 1695 void vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1696 void vshiftw(int opcode, XMMRegister dst, XMMRegister src); 1697 void vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1698 void vshiftq(int opcode, XMMRegister dst, XMMRegister src); 1699 void vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1700 #endif 1701 1702 // C2 compiled method's prolog code. 1703 void verified_entry(Compile* C, int sp_inc = 0); 1704 1705 enum RegState { 1706 reg_readonly, 1707 reg_writable, 1708 reg_written 1709 }; 1710 1711 int store_value_type_fields_to_buf(ciValueKlass* vk, bool from_interpreter = true); 1712 1713 // Unpack all value type arguments passed as oops 1714 void unpack_value_args(Compile* C, bool receiver_only); 1715 bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[], int ret_off, int extra_stack_offset); 1716 bool unpack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, VMReg from, VMRegPair* regs_to, int& to_index, 1717 RegState reg_state[], int ret_off, int extra_stack_offset); 1718 bool pack_value_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 1719 VMReg to, VMRegPair* regs_from, int regs_from_count, int& from_index, RegState reg_state[], 1720 int ret_off, int extra_stack_offset); 1721 void restore_stack(Compile* C); 1722 1723 int shuffle_value_args(bool is_packing, bool receiver_only, int extra_stack_offset, 1724 BasicType* sig_bt, const GrowableArray<SigEntry>* sig_cc, 1725 int args_passed, int args_on_stack, VMRegPair* regs, 1726 int args_passed_to, int args_on_stack_to, VMRegPair* regs_to); 1727 bool shuffle_value_args_spill(bool is_packing, const GrowableArray<SigEntry>* sig_cc, int sig_cc_index, 1728 VMRegPair* regs_from, int from_index, int regs_from_count, 1729 RegState* reg_state, int sp_inc, int extra_stack_offset); 1730 VMReg spill_reg_for(VMReg reg); 1731 1732 // clear memory of size 'cnt' qwords, starting at 'base'; 1733 // if 'is_large' is set, do not try to produce short loop 1734 void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only); 1735 1736 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1737 void xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp); 1738 1739 #ifdef COMPILER2 1740 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 1741 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 1742 1743 // IndexOf strings. 1744 // Small strings are loaded through stack if they cross page boundary. 1745 void string_indexof(Register str1, Register str2, 1746 Register cnt1, Register cnt2, 1747 int int_cnt2, Register result, 1748 XMMRegister vec, Register tmp, 1749 int ae); 1750 1751 // IndexOf for constant substrings with size >= 8 elements 1752 // which don't need to be loaded through stack. 1753 void string_indexofC8(Register str1, Register str2, 1754 Register cnt1, Register cnt2, 1755 int int_cnt2, Register result, 1756 XMMRegister vec, Register tmp, 1757 int ae); 1758 1759 // Smallest code: we don't need to load through stack, 1760 // check string tail. 1761 1762 // helper function for string_compare 1763 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 1764 Address::ScaleFactor scale, Address::ScaleFactor scale1, 1765 Address::ScaleFactor scale2, Register index, int ae); 1766 // Compare strings. 1767 void string_compare(Register str1, Register str2, 1768 Register cnt1, Register cnt2, Register result, 1769 XMMRegister vec1, int ae); 1770 1771 // Search for Non-ASCII character (Negative byte value) in a byte array, 1772 // return true if it has any and false otherwise. 1773 void has_negatives(Register ary1, Register len, 1774 Register result, Register tmp1, 1775 XMMRegister vec1, XMMRegister vec2); 1776 1777 // Compare char[] or byte[] arrays. 1778 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 1779 Register limit, Register result, Register chr, 1780 XMMRegister vec1, XMMRegister vec2, bool is_char); 1781 1782 #endif 1783 1784 // Fill primitive arrays 1785 void generate_fill(BasicType t, bool aligned, 1786 Register to, Register value, Register count, 1787 Register rtmp, XMMRegister xtmp); 1788 1789 void encode_iso_array(Register src, Register dst, Register len, 1790 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1791 XMMRegister tmp4, Register tmp5, Register result); 1792 1793 #ifdef _LP64 1794 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1795 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1796 Register y, Register y_idx, Register z, 1797 Register carry, Register product, 1798 Register idx, Register kdx); 1799 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1800 Register yz_idx, Register idx, 1801 Register carry, Register product, int offset); 1802 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1803 Register carry, Register carry2, 1804 Register idx, Register jdx, 1805 Register yz_idx1, Register yz_idx2, 1806 Register tmp, Register tmp3, Register tmp4); 1807 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1808 Register yz_idx, Register idx, Register jdx, 1809 Register carry, Register product, 1810 Register carry2); 1811 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1812 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1813 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1814 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1815 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1816 Register tmp2); 1817 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1818 Register rdxReg, Register raxReg); 1819 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1820 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1821 Register tmp3, Register tmp4); 1822 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1823 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1824 1825 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1826 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1827 Register raxReg); 1828 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1829 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1830 Register raxReg); 1831 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1832 Register result, Register tmp1, Register tmp2, 1833 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1834 #endif 1835 1836 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1837 void update_byte_crc32(Register crc, Register val, Register table); 1838 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1839 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1840 // Note on a naming convention: 1841 // Prefix w = register only used on a Westmere+ architecture 1842 // Prefix n = register only used on a Nehalem architecture 1843 #ifdef _LP64 1844 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1845 Register tmp1, Register tmp2, Register tmp3); 1846 #else 1847 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1848 Register tmp1, Register tmp2, Register tmp3, 1849 XMMRegister xtmp1, XMMRegister xtmp2); 1850 #endif 1851 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1852 Register in_out, 1853 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1854 XMMRegister w_xtmp2, 1855 Register tmp1, 1856 Register n_tmp2, Register n_tmp3); 1857 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1858 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1859 Register tmp1, Register tmp2, 1860 Register n_tmp3); 1861 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1862 Register in_out1, Register in_out2, Register in_out3, 1863 Register tmp1, Register tmp2, Register tmp3, 1864 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1865 Register tmp4, Register tmp5, 1866 Register n_tmp6); 1867 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1868 Register tmp1, Register tmp2, Register tmp3, 1869 Register tmp4, Register tmp5, Register tmp6, 1870 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1871 bool is_pclmulqdq_supported); 1872 // Fold 128-bit data chunk 1873 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1874 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1875 // Fold 8-bit data 1876 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1877 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1878 void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1879 1880 // Compress char[] array to byte[]. 1881 void char_array_compress(Register src, Register dst, Register len, 1882 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1883 XMMRegister tmp4, Register tmp5, Register result); 1884 1885 // Inflate byte[] array to char[]. 1886 void byte_array_inflate(Register src, Register dst, Register len, 1887 XMMRegister tmp1, Register tmp2); 1888 1889 #ifdef _LP64 1890 void cache_wb(Address line); 1891 void cache_wbsync(bool is_pre); 1892 #endif // _LP64 1893 1894 #include "asm/macroAssembler_common.hpp" 1895 1896 }; 1897 1898 /** 1899 * class SkipIfEqual: 1900 * 1901 * Instantiating this class will result in assembly code being output that will 1902 * jump around any code emitted between the creation of the instance and it's 1903 * automatic destruction at the end of a scope block, depending on the value of 1904 * the flag passed to the constructor, which will be checked at run-time. 1905 */ 1906 class SkipIfEqual { 1907 private: 1908 MacroAssembler* _masm; 1909 Label _label; 1910 1911 public: 1912 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1913 ~SkipIfEqual(); 1914 }; 1915 1916 #endif // CPU_X86_MACROASSEMBLER_X86_HPP