1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 31 // MacroAssembler extends Assembler by frequently used macros. 32 // 33 // Instructions for which a 'better' code sequence exists depending 34 // on arguments should also go in here. 35 36 class MacroAssembler: public Assembler { 37 friend class LIR_Assembler; 38 39 public: 40 using Assembler::mov; 41 using Assembler::movi; 42 43 protected: 44 45 // Support for VM calls 46 // 47 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 48 // may customize this version by overriding it for its purposes (e.g., to save/restore 49 // additional registers when doing a VM call). 50 virtual void call_VM_leaf_base( 51 address entry_point, // the entry point 52 int number_of_arguments, // the number of arguments to pop after the call 53 Label *retaddr = NULL 54 ); 55 56 virtual void call_VM_leaf_base( 57 address entry_point, // the entry point 58 int number_of_arguments, // the number of arguments to pop after the call 59 Label &retaddr) { 60 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 61 } 62 63 // This is the base routine called by the different versions of call_VM. The interpreter 64 // may customize this version by overriding it for its purposes (e.g., to save/restore 65 // additional registers when doing a VM call). 66 // 67 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base 68 // returns the register which contains the thread upon return. If a thread register has been 69 // specified, the return value will correspond to that register. If no last_java_sp is specified 70 // (noreg) than rsp will be used instead. 71 virtual void call_VM_base( // returns the register containing the thread upon return 72 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 73 Register java_thread, // the thread if computed before ; use noreg otherwise 74 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 75 address entry_point, // the entry point 76 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 77 bool check_exceptions // whether to check for pending exceptions after return 78 ); 79 80 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 81 82 // True if an XOR can be used to expand narrow klass references. 83 bool use_XOR_for_compressed_class_base; 84 85 public: 86 MacroAssembler(CodeBuffer* code) : Assembler(code) { 87 use_XOR_for_compressed_class_base 88 = (operand_valid_for_logical_immediate(false /*is32*/, 89 (uint64_t)Universe::narrow_klass_base()) 90 && ((uint64_t)Universe::narrow_klass_base() 91 > (1UL << log2_intptr(Universe::narrow_klass_range())))); 92 } 93 94 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 95 // The implementation is only non-empty for the InterpreterMacroAssembler, 96 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 97 virtual void check_and_handle_popframe(Register java_thread); 98 virtual void check_and_handle_earlyret(Register java_thread); 99 100 void safepoint_poll(Label& slow_path); 101 void safepoint_poll_acquire(Label& slow_path); 102 103 // Biased locking support 104 // lock_reg and obj_reg must be loaded up with the appropriate values. 105 // swap_reg is killed. 106 // tmp_reg must be supplied and must not be rscratch1 or rscratch2 107 // Optional slow case is for implementations (interpreter and C1) which branch to 108 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 109 // Returns offset of first potentially-faulting instruction for null 110 // check info (currently consumed only by C1). If 111 // swap_reg_contains_mark is true then returns -1 as it is assumed 112 // the calling code has already passed any potential faults. 113 int biased_locking_enter(Register lock_reg, Register obj_reg, 114 Register swap_reg, Register tmp_reg, 115 bool swap_reg_contains_mark, 116 Label& done, Label* slow_case = NULL, 117 BiasedLockingCounters* counters = NULL); 118 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 119 120 121 // Helper functions for statistics gathering. 122 // Unconditional atomic increment. 123 void atomic_incw(Register counter_addr, Register tmp, Register tmp2); 124 void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) { 125 lea(tmp1, counter_addr); 126 atomic_incw(tmp1, tmp2, tmp3); 127 } 128 // Load Effective Address 129 void lea(Register r, const Address &a) { 130 InstructionMark im(this); 131 code_section()->relocate(inst_mark(), a.rspec()); 132 a.lea(this, r); 133 } 134 135 void addmw(Address a, Register incr, Register scratch) { 136 ldrw(scratch, a); 137 addw(scratch, scratch, incr); 138 strw(scratch, a); 139 } 140 141 // Add constant to memory word 142 void addmw(Address a, int imm, Register scratch) { 143 ldrw(scratch, a); 144 if (imm > 0) 145 addw(scratch, scratch, (unsigned)imm); 146 else 147 subw(scratch, scratch, (unsigned)-imm); 148 strw(scratch, a); 149 } 150 151 void bind(Label& L) { 152 Assembler::bind(L); 153 code()->clear_last_insn(); 154 } 155 156 void membar(Membar_mask_bits order_constraint); 157 158 using Assembler::ldr; 159 using Assembler::str; 160 161 void ldr(Register Rx, const Address &adr); 162 void ldrw(Register Rw, const Address &adr); 163 void str(Register Rx, const Address &adr); 164 void strw(Register Rx, const Address &adr); 165 166 // Frame creation and destruction shared between JITs. 167 void build_frame(int framesize); 168 void remove_frame(int framesize); 169 170 virtual void _call_Unimplemented(address call_site) { 171 mov(rscratch2, call_site); 172 haltsim(); 173 } 174 175 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 176 177 virtual void notify(int type); 178 179 // aliases defined in AARCH64 spec 180 181 template<class T> 182 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } 183 // imm is limited to 12 bits. 184 inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); } 185 186 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } 187 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); } 188 189 void cset(Register Rd, Assembler::Condition cond) { 190 csinc(Rd, zr, zr, ~cond); 191 } 192 void csetw(Register Rd, Assembler::Condition cond) { 193 csincw(Rd, zr, zr, ~cond); 194 } 195 196 void cneg(Register Rd, Register Rn, Assembler::Condition cond) { 197 csneg(Rd, Rn, Rn, ~cond); 198 } 199 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) { 200 csnegw(Rd, Rn, Rn, ~cond); 201 } 202 203 inline void movw(Register Rd, Register Rn) { 204 if (Rd == sp || Rn == sp) { 205 addw(Rd, Rn, 0U); 206 } else { 207 orrw(Rd, zr, Rn); 208 } 209 } 210 inline void mov(Register Rd, Register Rn) { 211 assert(Rd != r31_sp && Rn != r31_sp, "should be"); 212 if (Rd == Rn) { 213 } else if (Rd == sp || Rn == sp) { 214 add(Rd, Rn, 0U); 215 } else { 216 orr(Rd, zr, Rn); 217 } 218 } 219 220 inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); } 221 inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); } 222 223 inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); } 224 inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); } 225 226 inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); } 227 inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); } 228 229 inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 230 bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 231 } 232 inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) { 233 bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 234 } 235 236 inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 237 bfmw(Rd, Rn, lsb, (lsb + width - 1)); 238 } 239 inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) { 240 bfm(Rd, Rn, lsb , (lsb + width - 1)); 241 } 242 243 inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 244 sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 245 } 246 inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 247 sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 248 } 249 250 inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 251 sbfmw(Rd, Rn, lsb, (lsb + width - 1)); 252 } 253 inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 254 sbfm(Rd, Rn, lsb , (lsb + width - 1)); 255 } 256 257 inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 258 ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 259 } 260 inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 261 ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 262 } 263 264 inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 265 ubfmw(Rd, Rn, lsb, (lsb + width - 1)); 266 } 267 inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 268 ubfm(Rd, Rn, lsb , (lsb + width - 1)); 269 } 270 271 inline void asrw(Register Rd, Register Rn, unsigned imm) { 272 sbfmw(Rd, Rn, imm, 31); 273 } 274 275 inline void asr(Register Rd, Register Rn, unsigned imm) { 276 sbfm(Rd, Rn, imm, 63); 277 } 278 279 inline void lslw(Register Rd, Register Rn, unsigned imm) { 280 ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm)); 281 } 282 283 inline void lsl(Register Rd, Register Rn, unsigned imm) { 284 ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm)); 285 } 286 287 inline void lsrw(Register Rd, Register Rn, unsigned imm) { 288 ubfmw(Rd, Rn, imm, 31); 289 } 290 291 inline void lsr(Register Rd, Register Rn, unsigned imm) { 292 ubfm(Rd, Rn, imm, 63); 293 } 294 295 inline void rorw(Register Rd, Register Rn, unsigned imm) { 296 extrw(Rd, Rn, Rn, imm); 297 } 298 299 inline void ror(Register Rd, Register Rn, unsigned imm) { 300 extr(Rd, Rn, Rn, imm); 301 } 302 303 inline void sxtbw(Register Rd, Register Rn) { 304 sbfmw(Rd, Rn, 0, 7); 305 } 306 inline void sxthw(Register Rd, Register Rn) { 307 sbfmw(Rd, Rn, 0, 15); 308 } 309 inline void sxtb(Register Rd, Register Rn) { 310 sbfm(Rd, Rn, 0, 7); 311 } 312 inline void sxth(Register Rd, Register Rn) { 313 sbfm(Rd, Rn, 0, 15); 314 } 315 inline void sxtw(Register Rd, Register Rn) { 316 sbfm(Rd, Rn, 0, 31); 317 } 318 319 inline void uxtbw(Register Rd, Register Rn) { 320 ubfmw(Rd, Rn, 0, 7); 321 } 322 inline void uxthw(Register Rd, Register Rn) { 323 ubfmw(Rd, Rn, 0, 15); 324 } 325 inline void uxtb(Register Rd, Register Rn) { 326 ubfm(Rd, Rn, 0, 7); 327 } 328 inline void uxth(Register Rd, Register Rn) { 329 ubfm(Rd, Rn, 0, 15); 330 } 331 inline void uxtw(Register Rd, Register Rn) { 332 ubfm(Rd, Rn, 0, 31); 333 } 334 335 inline void cmnw(Register Rn, Register Rm) { 336 addsw(zr, Rn, Rm); 337 } 338 inline void cmn(Register Rn, Register Rm) { 339 adds(zr, Rn, Rm); 340 } 341 342 inline void cmpw(Register Rn, Register Rm) { 343 subsw(zr, Rn, Rm); 344 } 345 inline void cmp(Register Rn, Register Rm) { 346 subs(zr, Rn, Rm); 347 } 348 349 inline void negw(Register Rd, Register Rn) { 350 subw(Rd, zr, Rn); 351 } 352 353 inline void neg(Register Rd, Register Rn) { 354 sub(Rd, zr, Rn); 355 } 356 357 inline void negsw(Register Rd, Register Rn) { 358 subsw(Rd, zr, Rn); 359 } 360 361 inline void negs(Register Rd, Register Rn) { 362 subs(Rd, zr, Rn); 363 } 364 365 inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 366 addsw(zr, Rn, Rm, kind, shift); 367 } 368 inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 369 adds(zr, Rn, Rm, kind, shift); 370 } 371 372 inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 373 subsw(zr, Rn, Rm, kind, shift); 374 } 375 inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 376 subs(zr, Rn, Rm, kind, shift); 377 } 378 379 inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 380 subw(Rd, zr, Rn, kind, shift); 381 } 382 383 inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 384 sub(Rd, zr, Rn, kind, shift); 385 } 386 387 inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 388 subsw(Rd, zr, Rn, kind, shift); 389 } 390 391 inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 392 subs(Rd, zr, Rn, kind, shift); 393 } 394 395 inline void mnegw(Register Rd, Register Rn, Register Rm) { 396 msubw(Rd, Rn, Rm, zr); 397 } 398 inline void mneg(Register Rd, Register Rn, Register Rm) { 399 msub(Rd, Rn, Rm, zr); 400 } 401 402 inline void mulw(Register Rd, Register Rn, Register Rm) { 403 maddw(Rd, Rn, Rm, zr); 404 } 405 inline void mul(Register Rd, Register Rn, Register Rm) { 406 madd(Rd, Rn, Rm, zr); 407 } 408 409 inline void smnegl(Register Rd, Register Rn, Register Rm) { 410 smsubl(Rd, Rn, Rm, zr); 411 } 412 inline void smull(Register Rd, Register Rn, Register Rm) { 413 smaddl(Rd, Rn, Rm, zr); 414 } 415 416 inline void umnegl(Register Rd, Register Rn, Register Rm) { 417 umsubl(Rd, Rn, Rm, zr); 418 } 419 inline void umull(Register Rd, Register Rn, Register Rm) { 420 umaddl(Rd, Rn, Rm, zr); 421 } 422 423 #define WRAP(INSN) \ 424 void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ 425 if ((VM_Version::features() & VM_Version::CPU_A53MAC) && Ra != zr) \ 426 nop(); \ 427 Assembler::INSN(Rd, Rn, Rm, Ra); \ 428 } 429 430 WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw) 431 WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl) 432 #undef WRAP 433 434 435 // macro assembly operations needed for aarch64 436 437 // first two private routines for loading 32 bit or 64 bit constants 438 private: 439 440 void mov_immediate64(Register dst, u_int64_t imm64); 441 void mov_immediate32(Register dst, u_int32_t imm32); 442 443 int push(unsigned int bitset, Register stack); 444 int pop(unsigned int bitset, Register stack); 445 446 void mov(Register dst, Address a); 447 448 public: 449 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } 450 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } 451 452 // Push and pop everything that might be clobbered by a native 453 // runtime call except rscratch1 and rscratch2. (They are always 454 // scratch, so we don't have to protect them.) Only save the lower 455 // 64 bits of each vector register. 456 void push_call_clobbered_registers(); 457 void pop_call_clobbered_registers(); 458 459 // now mov instructions for loading absolute addresses and 32 or 460 // 64 bit integers 461 462 inline void mov(Register dst, address addr) 463 { 464 mov_immediate64(dst, (u_int64_t)addr); 465 } 466 467 inline void mov(Register dst, u_int64_t imm64) 468 { 469 mov_immediate64(dst, imm64); 470 } 471 472 inline void movw(Register dst, u_int32_t imm32) 473 { 474 mov_immediate32(dst, imm32); 475 } 476 477 inline void mov(Register dst, long l) 478 { 479 mov(dst, (u_int64_t)l); 480 } 481 482 inline void mov(Register dst, int i) 483 { 484 mov(dst, (long)i); 485 } 486 487 void mov(Register dst, RegisterOrConstant src) { 488 if (src.is_register()) 489 mov(dst, src.as_register()); 490 else 491 mov(dst, src.as_constant()); 492 } 493 494 void movptr(Register r, uintptr_t imm64); 495 496 void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); 497 498 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 499 orr(Vd, T, Vn, Vn); 500 } 501 502 public: 503 504 // Generalized Test Bit And Branch, including a "far" variety which 505 // spans more than 32KiB. 506 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) { 507 assert(cond == EQ || cond == NE, "must be"); 508 509 if (far) 510 cond = ~cond; 511 512 void (Assembler::* branch)(Register Rt, int bitpos, Label &L); 513 if (cond == Assembler::EQ) 514 branch = &Assembler::tbz; 515 else 516 branch = &Assembler::tbnz; 517 518 if (far) { 519 Label L; 520 (this->*branch)(Rt, bitpos, L); 521 b(dest); 522 bind(L); 523 } else { 524 (this->*branch)(Rt, bitpos, dest); 525 } 526 } 527 528 // macro instructions for accessing and updating floating point 529 // status register 530 // 531 // FPSR : op1 == 011 532 // CRn == 0100 533 // CRm == 0100 534 // op2 == 001 535 536 inline void get_fpsr(Register reg) 537 { 538 mrs(0b11, 0b0100, 0b0100, 0b001, reg); 539 } 540 541 inline void set_fpsr(Register reg) 542 { 543 msr(0b011, 0b0100, 0b0100, 0b001, reg); 544 } 545 546 inline void clear_fpsr() 547 { 548 msr(0b011, 0b0100, 0b0100, 0b001, zr); 549 } 550 551 // DCZID_EL0: op1 == 011 552 // CRn == 0000 553 // CRm == 0000 554 // op2 == 111 555 inline void get_dczid_el0(Register reg) 556 { 557 mrs(0b011, 0b0000, 0b0000, 0b111, reg); 558 } 559 560 // CTR_EL0: op1 == 011 561 // CRn == 0000 562 // CRm == 0000 563 // op2 == 001 564 inline void get_ctr_el0(Register reg) 565 { 566 mrs(0b011, 0b0000, 0b0000, 0b001, reg); 567 } 568 569 // idiv variant which deals with MINLONG as dividend and -1 as divisor 570 int corrected_idivl(Register result, Register ra, Register rb, 571 bool want_remainder, Register tmp = rscratch1); 572 int corrected_idivq(Register result, Register ra, Register rb, 573 bool want_remainder, Register tmp = rscratch1); 574 575 // Support for NULL-checks 576 // 577 // Generates code that causes a NULL OS exception if the content of reg is NULL. 578 // If the accessed location is M[reg + offset] and the offset is known, provide the 579 // offset. No explicit code generation is needed if the offset is within a certain 580 // range (0 <= offset <= page_size). 581 582 virtual void null_check(Register reg, int offset = -1); 583 static bool needs_explicit_null_check(intptr_t offset); 584 585 static address target_addr_for_insn(address insn_addr, unsigned insn); 586 static address target_addr_for_insn(address insn_addr) { 587 unsigned insn = *(unsigned*)insn_addr; 588 return target_addr_for_insn(insn_addr, insn); 589 } 590 591 // Required platform-specific helpers for Label::patch_instructions. 592 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 593 static int pd_patch_instruction_size(address branch, address target); 594 static void pd_patch_instruction(address branch, address target) { 595 pd_patch_instruction_size(branch, target); 596 } 597 static address pd_call_destination(address branch) { 598 return target_addr_for_insn(branch); 599 } 600 #ifndef PRODUCT 601 static void pd_print_patched_instruction(address branch); 602 #endif 603 604 static int patch_oop(address insn_addr, address o); 605 static int patch_narrow_klass(address insn_addr, narrowKlass n); 606 607 address emit_trampoline_stub(int insts_call_instruction_offset, address target); 608 609 // The following 4 methods return the offset of the appropriate move instruction 610 611 // Support for fast byte/short loading with zero extension (depending on particular CPU) 612 int load_unsigned_byte(Register dst, Address src); 613 int load_unsigned_short(Register dst, Address src); 614 615 // Support for fast byte/short loading with sign extension (depending on particular CPU) 616 int load_signed_byte(Register dst, Address src); 617 int load_signed_short(Register dst, Address src); 618 619 int load_signed_byte32(Register dst, Address src); 620 int load_signed_short32(Register dst, Address src); 621 622 // Support for sign-extension (hi:lo = extend_sign(lo)) 623 void extend_sign(Register hi, Register lo); 624 625 // Load and store values by size and signed-ness 626 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 627 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 628 629 // Support for inc/dec with optimal instruction selection depending on value 630 631 // x86_64 aliases an unqualified register/address increment and 632 // decrement to call incrementq and decrementq but also supports 633 // explicitly sized calls to incrementq/decrementq or 634 // incrementl/decrementl 635 636 // for aarch64 the proper convention would be to use 637 // increment/decrement for 64 bit operatons and 638 // incrementw/decrementw for 32 bit operations. so when porting 639 // x86_64 code we can leave calls to increment/decrement as is, 640 // replace incrementq/decrementq with increment/decrement and 641 // replace incrementl/decrementl with incrementw/decrementw. 642 643 // n.b. increment/decrement calls with an Address destination will 644 // need to use a scratch register to load the value to be 645 // incremented. increment/decrement calls which add or subtract a 646 // constant value greater than 2^12 will need to use a 2nd scratch 647 // register to hold the constant. so, a register increment/decrement 648 // may trash rscratch2 and an address increment/decrement trash 649 // rscratch and rscratch2 650 651 void decrementw(Address dst, int value = 1); 652 void decrementw(Register reg, int value = 1); 653 654 void decrement(Register reg, int value = 1); 655 void decrement(Address dst, int value = 1); 656 657 void incrementw(Address dst, int value = 1); 658 void incrementw(Register reg, int value = 1); 659 660 void increment(Register reg, int value = 1); 661 void increment(Address dst, int value = 1); 662 663 664 // Alignment 665 void align(int modulus); 666 667 // Stack frame creation/removal 668 void enter() 669 { 670 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 671 mov(rfp, sp); 672 } 673 void leave() 674 { 675 mov(sp, rfp); 676 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 677 } 678 679 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 680 // The pointer will be loaded into the thread register. 681 void get_thread(Register thread); 682 683 684 // Support for VM calls 685 // 686 // It is imperative that all calls into the VM are handled via the call_VM macros. 687 // They make sure that the stack linkage is setup correctly. call_VM's correspond 688 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 689 690 691 void call_VM(Register oop_result, 692 address entry_point, 693 bool check_exceptions = true); 694 void call_VM(Register oop_result, 695 address entry_point, 696 Register arg_1, 697 bool check_exceptions = true); 698 void call_VM(Register oop_result, 699 address entry_point, 700 Register arg_1, Register arg_2, 701 bool check_exceptions = true); 702 void call_VM(Register oop_result, 703 address entry_point, 704 Register arg_1, Register arg_2, Register arg_3, 705 bool check_exceptions = true); 706 707 // Overloadings with last_Java_sp 708 void call_VM(Register oop_result, 709 Register last_java_sp, 710 address entry_point, 711 int number_of_arguments = 0, 712 bool check_exceptions = true); 713 void call_VM(Register oop_result, 714 Register last_java_sp, 715 address entry_point, 716 Register arg_1, bool 717 check_exceptions = true); 718 void call_VM(Register oop_result, 719 Register last_java_sp, 720 address entry_point, 721 Register arg_1, Register arg_2, 722 bool check_exceptions = true); 723 void call_VM(Register oop_result, 724 Register last_java_sp, 725 address entry_point, 726 Register arg_1, Register arg_2, Register arg_3, 727 bool check_exceptions = true); 728 729 void get_vm_result (Register oop_result, Register thread); 730 void get_vm_result_2(Register metadata_result, Register thread); 731 732 // These always tightly bind to MacroAssembler::call_VM_base 733 // bypassing the virtual implementation 734 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 735 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 736 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 737 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 738 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 739 740 void call_VM_leaf(address entry_point, 741 int number_of_arguments = 0); 742 void call_VM_leaf(address entry_point, 743 Register arg_1); 744 void call_VM_leaf(address entry_point, 745 Register arg_1, Register arg_2); 746 void call_VM_leaf(address entry_point, 747 Register arg_1, Register arg_2, Register arg_3); 748 749 // These always tightly bind to MacroAssembler::call_VM_leaf_base 750 // bypassing the virtual implementation 751 void super_call_VM_leaf(address entry_point); 752 void super_call_VM_leaf(address entry_point, Register arg_1); 753 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 754 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 755 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 756 757 // last Java Frame (fills frame anchor) 758 void set_last_Java_frame(Register last_java_sp, 759 Register last_java_fp, 760 address last_java_pc, 761 Register scratch); 762 763 void set_last_Java_frame(Register last_java_sp, 764 Register last_java_fp, 765 Label &last_java_pc, 766 Register scratch); 767 768 void set_last_Java_frame(Register last_java_sp, 769 Register last_java_fp, 770 Register last_java_pc, 771 Register scratch); 772 773 void reset_last_Java_frame(Register thread); 774 775 // thread in the default location (rthread) 776 void reset_last_Java_frame(bool clear_fp); 777 778 // Stores 779 void store_check(Register obj); // store check for obj - register is destroyed afterwards 780 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 781 782 void resolve_jobject(Register value, Register thread, Register tmp); 783 784 #if INCLUDE_ALL_GCS 785 786 void g1_write_barrier_pre(Register obj, 787 Register pre_val, 788 Register thread, 789 Register tmp, 790 bool tosca_live, 791 bool expand_call); 792 793 void g1_write_barrier_post(Register store_addr, 794 Register new_val, 795 Register thread, 796 Register tmp, 797 Register tmp2); 798 799 #endif // INCLUDE_ALL_GCS 800 801 // oop manipulations 802 void load_klass(Register dst, Register src); 803 void store_klass(Register dst, Register src); 804 void cmp_klass(Register oop, Register trial_klass, Register tmp); 805 806 void resolve_oop_handle(Register result, Register tmp = r5); 807 void load_mirror(Register dst, Register method, Register tmp = r5); 808 809 void load_heap_oop(Register dst, Address src); 810 811 void load_heap_oop_not_null(Register dst, Address src); 812 void store_heap_oop(Address dst, Register src); 813 814 // currently unimplemented 815 // Used for storing NULL. All other oop constants should be 816 // stored using routines that take a jobject. 817 void store_heap_oop_null(Address dst); 818 819 void load_prototype_header(Register dst, Register src); 820 821 void store_klass_gap(Register dst, Register src); 822 823 // This dummy is to prevent a call to store_heap_oop from 824 // converting a zero (like NULL) into a Register by giving 825 // the compiler two choices it can't resolve 826 827 void store_heap_oop(Address dst, void* dummy); 828 829 void encode_heap_oop(Register d, Register s); 830 void encode_heap_oop(Register r) { encode_heap_oop(r, r); } 831 void decode_heap_oop(Register d, Register s); 832 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 833 void encode_heap_oop_not_null(Register r); 834 void decode_heap_oop_not_null(Register r); 835 void encode_heap_oop_not_null(Register dst, Register src); 836 void decode_heap_oop_not_null(Register dst, Register src); 837 838 void set_narrow_oop(Register dst, jobject obj); 839 840 void encode_klass_not_null(Register r); 841 void decode_klass_not_null(Register r); 842 void encode_klass_not_null(Register dst, Register src); 843 void decode_klass_not_null(Register dst, Register src); 844 845 void set_narrow_klass(Register dst, Klass* k); 846 847 // if heap base register is used - reinit it with the correct value 848 void reinit_heapbase(); 849 850 DEBUG_ONLY(void verify_heapbase(const char* msg);) 851 852 void push_CPU_state(bool save_vectors = false); 853 void pop_CPU_state(bool restore_vectors = false) ; 854 855 // Round up to a power of two 856 void round_to(Register reg, int modulus); 857 858 // allocation 859 void eden_allocate( 860 Register obj, // result: pointer to object after successful allocation 861 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 862 int con_size_in_bytes, // object size in bytes if known at compile time 863 Register t1, // temp register 864 Label& slow_case // continuation point if fast allocation fails 865 ); 866 void tlab_allocate( 867 Register obj, // result: pointer to object after successful allocation 868 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 869 int con_size_in_bytes, // object size in bytes if known at compile time 870 Register t1, // temp register 871 Register t2, // temp register 872 Label& slow_case // continuation point if fast allocation fails 873 ); 874 void zero_memory(Register addr, Register len, Register t1); 875 void verify_tlab(); 876 877 void incr_allocated_bytes(Register thread, 878 Register var_size_in_bytes, int con_size_in_bytes, 879 Register t1 = noreg); 880 881 // interface method calling 882 void lookup_interface_method(Register recv_klass, 883 Register intf_klass, 884 RegisterOrConstant itable_index, 885 Register method_result, 886 Register scan_temp, 887 Label& no_such_interface, 888 bool return_method = true); 889 890 // virtual method calling 891 // n.b. x86 allows RegisterOrConstant for vtable_index 892 void lookup_virtual_method(Register recv_klass, 893 RegisterOrConstant vtable_index, 894 Register method_result); 895 896 // Test sub_klass against super_klass, with fast and slow paths. 897 898 // The fast path produces a tri-state answer: yes / no / maybe-slow. 899 // One of the three labels can be NULL, meaning take the fall-through. 900 // If super_check_offset is -1, the value is loaded up from super_klass. 901 // No registers are killed, except temp_reg. 902 void check_klass_subtype_fast_path(Register sub_klass, 903 Register super_klass, 904 Register temp_reg, 905 Label* L_success, 906 Label* L_failure, 907 Label* L_slow_path, 908 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 909 910 // The rest of the type check; must be wired to a corresponding fast path. 911 // It does not repeat the fast path logic, so don't use it standalone. 912 // The temp_reg and temp2_reg can be noreg, if no temps are available. 913 // Updates the sub's secondary super cache as necessary. 914 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 915 void check_klass_subtype_slow_path(Register sub_klass, 916 Register super_klass, 917 Register temp_reg, 918 Register temp2_reg, 919 Label* L_success, 920 Label* L_failure, 921 bool set_cond_codes = false); 922 923 // Simplified, combined version, good for typical uses. 924 // Falls through on failure. 925 void check_klass_subtype(Register sub_klass, 926 Register super_klass, 927 Register temp_reg, 928 Label& L_success); 929 930 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 931 932 933 // Debugging 934 935 // only if +VerifyOops 936 void verify_oop(Register reg, const char* s = "broken oop"); 937 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 938 939 // TODO: verify method and klass metadata (compare against vptr?) 940 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 941 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 942 943 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 944 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 945 946 // only if +VerifyFPU 947 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 948 949 // prints msg, dumps registers and stops execution 950 void stop(const char* msg); 951 952 // prints msg and continues 953 void warn(const char* msg); 954 955 static void debug64(char* msg, int64_t pc, int64_t regs[]); 956 957 void untested() { stop("untested"); } 958 959 void unimplemented(const char* what = ""); 960 961 void should_not_reach_here() { stop("should not reach here"); } 962 963 // Stack overflow checking 964 void bang_stack_with_offset(int offset) { 965 // stack grows down, caller passes positive offset 966 assert(offset > 0, "must bang with negative offset"); 967 sub(rscratch2, sp, offset); 968 str(zr, Address(rscratch2)); 969 } 970 971 // Writes to stack successive pages until offset reached to check for 972 // stack overflow + shadow pages. Also, clobbers tmp 973 void bang_stack_size(Register size, Register tmp); 974 975 // Check for reserved stack access in method being exited (for JIT) 976 void reserved_stack_check(); 977 978 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 979 Register tmp, 980 int offset); 981 982 // Support for serializing memory accesses between threads 983 void serialize_memory(Register thread, Register tmp); 984 985 // Arithmetics 986 987 void addptr(const Address &dst, int32_t src); 988 void cmpptr(Register src1, Address src2); 989 990 // Various forms of CAS 991 992 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 993 Label &suceed, Label *fail); 994 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 995 Label &suceed, Label *fail); 996 997 void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 998 Label &suceed, Label *fail); 999 1000 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 1001 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 1002 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); 1003 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); 1004 1005 void atomic_xchg(Register prev, Register newv, Register addr); 1006 void atomic_xchgw(Register prev, Register newv, Register addr); 1007 void atomic_xchgal(Register prev, Register newv, Register addr); 1008 void atomic_xchgalw(Register prev, Register newv, Register addr); 1009 1010 void orptr(Address adr, RegisterOrConstant src) { 1011 ldr(rscratch1, adr); 1012 if (src.is_register()) 1013 orr(rscratch1, rscratch1, src.as_register()); 1014 else 1015 orr(rscratch1, rscratch1, src.as_constant()); 1016 str(rscratch1, adr); 1017 } 1018 1019 // A generic CAS; success or failure is in the EQ flag. 1020 // Clobbers rscratch1 1021 void cmpxchg(Register addr, Register expected, Register new_val, 1022 enum operand_size size, 1023 bool acquire, bool release, bool weak, 1024 Register result); 1025 1026 // Calls 1027 1028 address trampoline_call(Address entry, CodeBuffer *cbuf = NULL); 1029 1030 static bool far_branches() { 1031 return ReservedCodeCacheSize > branch_range; 1032 } 1033 1034 // Jumps that can reach anywhere in the code cache. 1035 // Trashes tmp. 1036 void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1037 void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1038 1039 static int far_branch_size() { 1040 if (far_branches()) { 1041 return 3 * 4; // adrp, add, br 1042 } else { 1043 return 4; 1044 } 1045 } 1046 1047 // Emit the CompiledIC call idiom 1048 address ic_call(address entry, jint method_index = 0); 1049 1050 public: 1051 1052 // Data 1053 1054 void mov_metadata(Register dst, Metadata* obj); 1055 Address allocate_metadata_address(Metadata* obj); 1056 Address constant_oop_address(jobject obj); 1057 1058 void movoop(Register dst, jobject obj, bool immediate = false); 1059 1060 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1061 void kernel_crc32(Register crc, Register buf, Register len, 1062 Register table0, Register table1, Register table2, Register table3, 1063 Register tmp, Register tmp2, Register tmp3); 1064 // CRC32 code for java.util.zip.CRC32C::updateBytes() instrinsic. 1065 void kernel_crc32c(Register crc, Register buf, Register len, 1066 Register table0, Register table1, Register table2, Register table3, 1067 Register tmp, Register tmp2, Register tmp3); 1068 1069 // Stack push and pop individual 64 bit registers 1070 void push(Register src); 1071 void pop(Register dst); 1072 1073 // push all registers onto the stack 1074 void pusha(); 1075 void popa(); 1076 1077 void repne_scan(Register addr, Register value, Register count, 1078 Register scratch); 1079 void repne_scanw(Register addr, Register value, Register count, 1080 Register scratch); 1081 1082 typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm); 1083 typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift); 1084 1085 // If a constant does not fit in an immediate field, generate some 1086 // number of MOV instructions and then perform the operation 1087 void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1088 add_sub_imm_insn insn1, 1089 add_sub_reg_insn insn2); 1090 // Seperate vsn which sets the flags 1091 void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1092 add_sub_imm_insn insn1, 1093 add_sub_reg_insn insn2); 1094 1095 #define WRAP(INSN) \ 1096 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1097 wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1098 } \ 1099 \ 1100 void INSN(Register Rd, Register Rn, Register Rm, \ 1101 enum shift_kind kind, unsigned shift = 0) { \ 1102 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1103 } \ 1104 \ 1105 void INSN(Register Rd, Register Rn, Register Rm) { \ 1106 Assembler::INSN(Rd, Rn, Rm); \ 1107 } \ 1108 \ 1109 void INSN(Register Rd, Register Rn, Register Rm, \ 1110 ext::operation option, int amount = 0) { \ 1111 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1112 } 1113 1114 WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw) 1115 1116 #undef WRAP 1117 #define WRAP(INSN) \ 1118 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1119 wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1120 } \ 1121 \ 1122 void INSN(Register Rd, Register Rn, Register Rm, \ 1123 enum shift_kind kind, unsigned shift = 0) { \ 1124 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1125 } \ 1126 \ 1127 void INSN(Register Rd, Register Rn, Register Rm) { \ 1128 Assembler::INSN(Rd, Rn, Rm); \ 1129 } \ 1130 \ 1131 void INSN(Register Rd, Register Rn, Register Rm, \ 1132 ext::operation option, int amount = 0) { \ 1133 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1134 } 1135 1136 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw) 1137 1138 void add(Register Rd, Register Rn, RegisterOrConstant increment); 1139 void addw(Register Rd, Register Rn, RegisterOrConstant increment); 1140 void sub(Register Rd, Register Rn, RegisterOrConstant decrement); 1141 void subw(Register Rd, Register Rn, RegisterOrConstant decrement); 1142 1143 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset); 1144 1145 void tableswitch(Register index, jint lowbound, jint highbound, 1146 Label &jumptable, Label &jumptable_end, int stride = 1) { 1147 adr(rscratch1, jumptable); 1148 subsw(rscratch2, index, lowbound); 1149 subsw(zr, rscratch2, highbound - lowbound); 1150 br(Assembler::HS, jumptable_end); 1151 add(rscratch1, rscratch1, rscratch2, 1152 ext::sxtw, exact_log2(stride * Assembler::instruction_size)); 1153 br(rscratch1); 1154 } 1155 1156 // Form an address from base + offset in Rd. Rd may or may not 1157 // actually be used: you must use the Address that is returned. It 1158 // is up to you to ensure that the shift provided matches the size 1159 // of your data. 1160 Address form_address(Register Rd, Register base, long byte_offset, int shift); 1161 1162 // Return true iff an address is within the 48-bit AArch64 address 1163 // space. 1164 bool is_valid_AArch64_address(address a) { 1165 return ((uint64_t)a >> 48) == 0; 1166 } 1167 1168 // Load the base of the cardtable byte map into reg. 1169 void load_byte_map_base(Register reg); 1170 1171 // Prolog generator routines to support switch between x86 code and 1172 // generated ARM code 1173 1174 // routine to generate an x86 prolog for a stub function which 1175 // bootstraps into the generated ARM code which directly follows the 1176 // stub 1177 // 1178 1179 public: 1180 // enum used for aarch64--x86 linkage to define return type of x86 function 1181 enum ret_type { ret_type_void, ret_type_integral, ret_type_float, ret_type_double}; 1182 1183 #ifdef BUILTIN_SIM 1184 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, address *prolog_ptr = NULL); 1185 #else 1186 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type) { } 1187 #endif 1188 1189 // special version of call_VM_leaf_base needed for aarch64 simulator 1190 // where we need to specify both the gp and fp arg counts and the 1191 // return type so that the linkage routine from aarch64 to x86 and 1192 // back knows which aarch64 registers to copy to x86 registers and 1193 // which x86 result register to copy back to an aarch64 register 1194 1195 void call_VM_leaf_base1( 1196 address entry_point, // the entry point 1197 int number_of_gp_arguments, // the number of gp reg arguments to pass 1198 int number_of_fp_arguments, // the number of fp reg arguments to pass 1199 ret_type type, // the return type for the call 1200 Label* retaddr = NULL 1201 ); 1202 1203 void ldr_constant(Register dest, const Address &const_addr) { 1204 if (NearCpool) { 1205 ldr(dest, const_addr); 1206 } else { 1207 unsigned long offset; 1208 adrp(dest, InternalAddress(const_addr.target()), offset); 1209 ldr(dest, Address(dest, offset)); 1210 } 1211 } 1212 1213 address read_polling_page(Register r, address page, relocInfo::relocType rtype); 1214 address read_polling_page(Register r, relocInfo::relocType rtype); 1215 void get_polling_page(Register dest, address page, relocInfo::relocType rtype); 1216 1217 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1218 void update_byte_crc32(Register crc, Register val, Register table); 1219 void update_word_crc32(Register crc, Register v, Register tmp, 1220 Register table0, Register table1, Register table2, Register table3, 1221 bool upper = false); 1222 1223 void string_compare(Register str1, Register str2, 1224 Register cnt1, Register cnt2, Register result, 1225 Register tmp1, 1226 FloatRegister vtmp, FloatRegister vtmpZ, int ae); 1227 1228 void has_negatives(Register ary1, Register len, Register result); 1229 1230 void arrays_equals(Register a1, Register a2, Register result, Register cnt1, 1231 Register tmp1, Register tmp2, Register tmp3, int elem_size); 1232 1233 void string_equals(Register a1, Register a2, Register result, Register cnt1, 1234 int elem_size); 1235 1236 void fill_words(Register base, Register cnt, Register value); 1237 void zero_words(Register base, u_int64_t cnt); 1238 void zero_words(Register ptr, Register cnt); 1239 void zero_dcache_blocks(Register base, Register cnt); 1240 1241 static const int zero_words_block_size; 1242 1243 void byte_array_inflate(Register src, Register dst, Register len, 1244 FloatRegister vtmp1, FloatRegister vtmp2, 1245 FloatRegister vtmp3, Register tmp4); 1246 1247 void char_array_compress(Register src, Register dst, Register len, 1248 FloatRegister tmp1Reg, FloatRegister tmp2Reg, 1249 FloatRegister tmp3Reg, FloatRegister tmp4Reg, 1250 Register result); 1251 1252 void encode_iso_array(Register src, Register dst, 1253 Register len, Register result, 1254 FloatRegister Vtmp1, FloatRegister Vtmp2, 1255 FloatRegister Vtmp3, FloatRegister Vtmp4); 1256 void string_indexof(Register str1, Register str2, 1257 Register cnt1, Register cnt2, 1258 Register tmp1, Register tmp2, 1259 Register tmp3, Register tmp4, 1260 int int_cnt1, Register result, int ae); 1261 void string_indexof_char(Register str1, Register cnt1, 1262 Register ch, Register result, 1263 Register tmp1, Register tmp2, Register tmp3); 1264 private: 1265 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1266 Register src1, Register src2); 1267 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 1268 add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2); 1269 } 1270 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1271 Register y, Register y_idx, Register z, 1272 Register carry, Register product, 1273 Register idx, Register kdx); 1274 void multiply_128_x_128_loop(Register y, Register z, 1275 Register carry, Register carry2, 1276 Register idx, Register jdx, 1277 Register yz_idx1, Register yz_idx2, 1278 Register tmp, Register tmp3, Register tmp4, 1279 Register tmp7, Register product_hi); 1280 void kernel_crc32_using_crc32(Register crc, Register buf, 1281 Register len, Register tmp0, Register tmp1, Register tmp2, 1282 Register tmp3); 1283 void kernel_crc32c_using_crc32c(Register crc, Register buf, 1284 Register len, Register tmp0, Register tmp1, Register tmp2, 1285 Register tmp3); 1286 public: 1287 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, 1288 Register zlen, Register tmp1, Register tmp2, Register tmp3, 1289 Register tmp4, Register tmp5, Register tmp6, Register tmp7); 1290 void mul_add(Register out, Register in, Register offs, Register len, Register k); 1291 // ISB may be needed because of a safepoint 1292 void maybe_isb() { isb(); } 1293 1294 private: 1295 // Return the effective address r + (r1 << ext) + offset. 1296 // Uses rscratch2. 1297 Address offsetted_address(Register r, Register r1, Address::extend ext, 1298 int offset, int size); 1299 1300 private: 1301 // Returns an address on the stack which is reachable with a ldr/str of size 1302 // Uses rscratch2 if the address is not directly reachable 1303 Address spill_address(int size, int offset, Register tmp=rscratch2); 1304 1305 bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const; 1306 1307 // Check whether two loads/stores can be merged into ldp/stp. 1308 bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const; 1309 1310 // Merge current load/store with previous load/store into ldp/stp. 1311 void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1312 1313 // Try to merge two loads/stores into ldp/stp. If success, returns true else false. 1314 bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1315 1316 public: 1317 void spill(Register Rx, bool is64, int offset) { 1318 if (is64) { 1319 str(Rx, spill_address(8, offset)); 1320 } else { 1321 strw(Rx, spill_address(4, offset)); 1322 } 1323 } 1324 void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1325 str(Vx, T, spill_address(1 << (int)T, offset)); 1326 } 1327 void unspill(Register Rx, bool is64, int offset) { 1328 if (is64) { 1329 ldr(Rx, spill_address(8, offset)); 1330 } else { 1331 ldrw(Rx, spill_address(4, offset)); 1332 } 1333 } 1334 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1335 ldr(Vx, T, spill_address(1 << (int)T, offset)); 1336 } 1337 void spill_copy128(int src_offset, int dst_offset, 1338 Register tmp1=rscratch1, Register tmp2=rscratch2) { 1339 if (src_offset < 512 && (src_offset & 7) == 0 && 1340 dst_offset < 512 && (dst_offset & 7) == 0) { 1341 ldp(tmp1, tmp2, Address(sp, src_offset)); 1342 stp(tmp1, tmp2, Address(sp, dst_offset)); 1343 } else { 1344 unspill(tmp1, true, src_offset); 1345 spill(tmp1, true, dst_offset); 1346 unspill(tmp1, true, src_offset+8); 1347 spill(tmp1, true, dst_offset+8); 1348 } 1349 } 1350 }; 1351 1352 #ifdef ASSERT 1353 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1354 #endif 1355 1356 /** 1357 * class SkipIfEqual: 1358 * 1359 * Instantiating this class will result in assembly code being output that will 1360 * jump around any code emitted between the creation of the instance and it's 1361 * automatic destruction at the end of a scope block, depending on the value of 1362 * the flag passed to the constructor, which will be checked at run-time. 1363 */ 1364 class SkipIfEqual { 1365 private: 1366 MacroAssembler* _masm; 1367 Label _label; 1368 1369 public: 1370 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1371 ~SkipIfEqual(); 1372 }; 1373 1374 struct tableswitch { 1375 Register _reg; 1376 int _insn_index; jint _first_key; jint _last_key; 1377 Label _after; 1378 Label _branches; 1379 }; 1380 1381 #endif // CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP --- EOF ---