1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 31 // MacroAssembler extends Assembler by frequently used macros. 32 // 33 // Instructions for which a 'better' code sequence exists depending 34 // on arguments should also go in here. 35 36 class MacroAssembler: public Assembler { 37 friend class LIR_Assembler; 38 39 using Assembler::mov; 40 using Assembler::movi; 41 42 protected: 43 44 // Support for VM calls 45 // 46 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 47 // may customize this version by overriding it for its purposes (e.g., to save/restore 48 // additional registers when doing a VM call). 49 #ifdef CC_INTERP 50 // c++ interpreter never wants to use interp_masm version of call_VM 51 #define VIRTUAL 52 #else 53 #define VIRTUAL virtual 54 #endif 55 56 VIRTUAL void call_VM_leaf_base( 57 address entry_point, // the entry point 58 int number_of_arguments, // the number of arguments to pop after the call 59 Label *retaddr = NULL 60 ); 61 62 VIRTUAL void call_VM_leaf_base( 63 address entry_point, // the entry point 64 int number_of_arguments, // the number of arguments to pop after the call 65 Label &retaddr) { 66 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 67 } 68 69 // This is the base routine called by the different versions of call_VM. The interpreter 70 // may customize this version by overriding it for its purposes (e.g., to save/restore 71 // additional registers when doing a VM call). 72 // 73 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base 74 // returns the register which contains the thread upon return. If a thread register has been 75 // specified, the return value will correspond to that register. If no last_java_sp is specified 76 // (noreg) than rsp will be used instead. 77 VIRTUAL void call_VM_base( // returns the register containing the thread upon return 78 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 79 Register java_thread, // the thread if computed before ; use noreg otherwise 80 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 81 address entry_point, // the entry point 82 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 83 bool check_exceptions // whether to check for pending exceptions after return 84 ); 85 86 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 87 // The implementation is only non-empty for the InterpreterMacroAssembler, 88 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 89 virtual void check_and_handle_popframe(Register java_thread); 90 virtual void check_and_handle_earlyret(Register java_thread); 91 92 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 93 94 // Maximum size of class area in Metaspace when compressed 95 uint64_t use_XOR_for_compressed_class_base; 96 97 public: 98 MacroAssembler(CodeBuffer* code) : Assembler(code) { 99 use_XOR_for_compressed_class_base 100 = (operand_valid_for_logical_immediate(false /*is32*/, 101 (uint64_t)Universe::narrow_klass_base()) 102 && ((uint64_t)Universe::narrow_klass_base() 103 > (1u << log2_intptr(CompressedClassSpaceSize)))); 104 } 105 106 // Biased locking support 107 // lock_reg and obj_reg must be loaded up with the appropriate values. 108 // swap_reg is killed. 109 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 110 // be killed; if not supplied, push/pop will be used internally to 111 // allocate a temporary (inefficient, avoid if possible). 112 // Optional slow case is for implementations (interpreter and C1) which branch to 113 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 114 // Returns offset of first potentially-faulting instruction for null 115 // check info (currently consumed only by C1). If 116 // swap_reg_contains_mark is true then returns -1 as it is assumed 117 // the calling code has already passed any potential faults. 118 int biased_locking_enter(Register lock_reg, Register obj_reg, 119 Register swap_reg, Register tmp_reg, 120 bool swap_reg_contains_mark, 121 Label& done, Label* slow_case = NULL, 122 BiasedLockingCounters* counters = NULL); 123 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 124 125 126 // Helper functions for statistics gathering. 127 // Unconditional atomic increment. 128 void atomic_incw(Register counter_addr, Register tmp); 129 void atomic_incw(Address counter_addr, Register tmp1, Register tmp2) { 130 lea(tmp1, counter_addr); 131 atomic_incw(tmp1, tmp2); 132 } 133 // Load Effective Address 134 void lea(Register r, const Address &a) { 135 InstructionMark im(this); 136 code_section()->relocate(inst_mark(), a.rspec()); 137 a.lea(this, r); 138 } 139 140 void addmw(Address a, Register incr, Register scratch) { 141 ldrw(scratch, a); 142 addw(scratch, scratch, incr); 143 strw(scratch, a); 144 } 145 146 // Add constant to memory word 147 void addmw(Address a, int imm, Register scratch) { 148 ldrw(scratch, a); 149 if (imm > 0) 150 addw(scratch, scratch, (unsigned)imm); 151 else 152 subw(scratch, scratch, (unsigned)-imm); 153 strw(scratch, a); 154 } 155 156 // Frame creation and destruction shared between JITs. 157 void build_frame(int framesize); 158 void remove_frame(int framesize); 159 160 virtual void _call_Unimplemented(address call_site) { 161 mov(rscratch2, call_site); 162 haltsim(); 163 } 164 165 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 166 167 virtual void notify(int type); 168 169 // aliases defined in AARCH64 spec 170 171 template<class T> 172 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } 173 inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); } 174 175 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } 176 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); } 177 178 void cset(Register Rd, Assembler::Condition cond) { 179 csinc(Rd, zr, zr, ~cond); 180 } 181 void csetw(Register Rd, Assembler::Condition cond) { 182 csincw(Rd, zr, zr, ~cond); 183 } 184 185 void cneg(Register Rd, Register Rn, Assembler::Condition cond) { 186 csneg(Rd, Rn, Rn, ~cond); 187 } 188 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) { 189 csnegw(Rd, Rn, Rn, ~cond); 190 } 191 192 inline void movw(Register Rd, Register Rn) { 193 if (Rd == sp || Rn == sp) { 194 addw(Rd, Rn, 0U); 195 } else { 196 orrw(Rd, zr, Rn); 197 } 198 } 199 inline void mov(Register Rd, Register Rn) { 200 assert(Rd != r31_sp && Rn != r31_sp, "should be"); 201 if (Rd == Rn) { 202 } else if (Rd == sp || Rn == sp) { 203 add(Rd, Rn, 0U); 204 } else { 205 orr(Rd, zr, Rn); 206 } 207 } 208 209 inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); } 210 inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); } 211 212 inline void tstw(Register Rd, unsigned imm) { andsw(zr, Rd, imm); } 213 inline void tst(Register Rd, unsigned imm) { ands(zr, Rd, imm); } 214 215 inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 216 bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 217 } 218 inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) { 219 bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 220 } 221 222 inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 223 bfmw(Rd, Rn, lsb, (lsb + width - 1)); 224 } 225 inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) { 226 bfm(Rd, Rn, lsb , (lsb + width - 1)); 227 } 228 229 inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 230 sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 231 } 232 inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 233 sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 234 } 235 236 inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 237 sbfmw(Rd, Rn, lsb, (lsb + width - 1)); 238 } 239 inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 240 sbfm(Rd, Rn, lsb , (lsb + width - 1)); 241 } 242 243 inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 244 ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 245 } 246 inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 247 ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 248 } 249 250 inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 251 ubfmw(Rd, Rn, lsb, (lsb + width - 1)); 252 } 253 inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 254 ubfm(Rd, Rn, lsb , (lsb + width - 1)); 255 } 256 257 inline void asrw(Register Rd, Register Rn, unsigned imm) { 258 sbfmw(Rd, Rn, imm, 31); 259 } 260 261 inline void asr(Register Rd, Register Rn, unsigned imm) { 262 sbfm(Rd, Rn, imm, 63); 263 } 264 265 inline void lslw(Register Rd, Register Rn, unsigned imm) { 266 ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm)); 267 } 268 269 inline void lsl(Register Rd, Register Rn, unsigned imm) { 270 ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm)); 271 } 272 273 inline void lsrw(Register Rd, Register Rn, unsigned imm) { 274 ubfmw(Rd, Rn, imm, 31); 275 } 276 277 inline void lsr(Register Rd, Register Rn, unsigned imm) { 278 ubfm(Rd, Rn, imm, 63); 279 } 280 281 inline void rorw(Register Rd, Register Rn, unsigned imm) { 282 extrw(Rd, Rn, Rn, imm); 283 } 284 285 inline void ror(Register Rd, Register Rn, unsigned imm) { 286 extr(Rd, Rn, Rn, imm); 287 } 288 289 inline void sxtbw(Register Rd, Register Rn) { 290 sbfmw(Rd, Rn, 0, 7); 291 } 292 inline void sxthw(Register Rd, Register Rn) { 293 sbfmw(Rd, Rn, 0, 15); 294 } 295 inline void sxtb(Register Rd, Register Rn) { 296 sbfm(Rd, Rn, 0, 7); 297 } 298 inline void sxth(Register Rd, Register Rn) { 299 sbfm(Rd, Rn, 0, 15); 300 } 301 inline void sxtw(Register Rd, Register Rn) { 302 sbfm(Rd, Rn, 0, 31); 303 } 304 305 inline void uxtbw(Register Rd, Register Rn) { 306 ubfmw(Rd, Rn, 0, 7); 307 } 308 inline void uxthw(Register Rd, Register Rn) { 309 ubfmw(Rd, Rn, 0, 15); 310 } 311 inline void uxtb(Register Rd, Register Rn) { 312 ubfm(Rd, Rn, 0, 7); 313 } 314 inline void uxth(Register Rd, Register Rn) { 315 ubfm(Rd, Rn, 0, 15); 316 } 317 inline void uxtw(Register Rd, Register Rn) { 318 ubfm(Rd, Rn, 0, 31); 319 } 320 321 inline void cmnw(Register Rn, Register Rm) { 322 addsw(zr, Rn, Rm); 323 } 324 inline void cmn(Register Rn, Register Rm) { 325 adds(zr, Rn, Rm); 326 } 327 328 inline void cmpw(Register Rn, Register Rm) { 329 subsw(zr, Rn, Rm); 330 } 331 inline void cmp(Register Rn, Register Rm) { 332 subs(zr, Rn, Rm); 333 } 334 335 inline void negw(Register Rd, Register Rn) { 336 subw(Rd, zr, Rn); 337 } 338 339 inline void neg(Register Rd, Register Rn) { 340 sub(Rd, zr, Rn); 341 } 342 343 inline void negsw(Register Rd, Register Rn) { 344 subsw(Rd, zr, Rn); 345 } 346 347 inline void negs(Register Rd, Register Rn) { 348 subs(Rd, zr, Rn); 349 } 350 351 inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 352 addsw(zr, Rn, Rm, kind, shift); 353 } 354 inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 355 adds(zr, Rn, Rm, kind, shift); 356 } 357 358 inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 359 subsw(zr, Rn, Rm, kind, shift); 360 } 361 inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 362 subs(zr, Rn, Rm, kind, shift); 363 } 364 365 inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 366 subw(Rd, zr, Rn, kind, shift); 367 } 368 369 inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 370 sub(Rd, zr, Rn, kind, shift); 371 } 372 373 inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 374 subsw(Rd, zr, Rn, kind, shift); 375 } 376 377 inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 378 subs(Rd, zr, Rn, kind, shift); 379 } 380 381 inline void mnegw(Register Rd, Register Rn, Register Rm) { 382 msubw(Rd, Rn, Rm, zr); 383 } 384 inline void mneg(Register Rd, Register Rn, Register Rm) { 385 msub(Rd, Rn, Rm, zr); 386 } 387 388 inline void mulw(Register Rd, Register Rn, Register Rm) { 389 maddw(Rd, Rn, Rm, zr); 390 } 391 inline void mul(Register Rd, Register Rn, Register Rm) { 392 madd(Rd, Rn, Rm, zr); 393 } 394 395 inline void smnegl(Register Rd, Register Rn, Register Rm) { 396 smsubl(Rd, Rn, Rm, zr); 397 } 398 inline void smull(Register Rd, Register Rn, Register Rm) { 399 smaddl(Rd, Rn, Rm, zr); 400 } 401 402 inline void umnegl(Register Rd, Register Rn, Register Rm) { 403 umsubl(Rd, Rn, Rm, zr); 404 } 405 inline void umull(Register Rd, Register Rn, Register Rm) { 406 umaddl(Rd, Rn, Rm, zr); 407 } 408 409 #define WRAP(INSN) \ 410 void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ 411 if ((VM_Version::cpu_cpuFeatures() & VM_Version::CPU_A53MAC) && Ra != zr) \ 412 nop(); \ 413 Assembler::INSN(Rd, Rn, Rm, Ra); \ 414 } 415 416 WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw) 417 WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl) 418 #undef WRAP 419 420 421 // macro assembly operations needed for aarch64 422 423 // first two private routines for loading 32 bit or 64 bit constants 424 private: 425 426 void mov_immediate64(Register dst, u_int64_t imm64); 427 void mov_immediate32(Register dst, u_int32_t imm32); 428 429 int push(unsigned int bitset, Register stack); 430 int pop(unsigned int bitset, Register stack); 431 432 void mov(Register dst, Address a); 433 434 public: 435 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } 436 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } 437 438 // now mov instructions for loading absolute addresses and 32 or 439 // 64 bit integers 440 441 inline void mov(Register dst, address addr) 442 { 443 mov_immediate64(dst, (u_int64_t)addr); 444 } 445 446 inline void mov(Register dst, u_int64_t imm64) 447 { 448 mov_immediate64(dst, imm64); 449 } 450 451 inline void movw(Register dst, u_int32_t imm32) 452 { 453 mov_immediate32(dst, imm32); 454 } 455 456 inline void mov(Register dst, long l) 457 { 458 mov(dst, (u_int64_t)l); 459 } 460 461 inline void mov(Register dst, int i) 462 { 463 mov(dst, (long)i); 464 } 465 466 void movptr(Register r, uintptr_t imm64); 467 468 // Macro to mov replicated immediate to vector register. 469 // Where imm32 == hex abcdefgh, Vd will get the following values 470 // for different arrangements in T 471 // T8B: Vd = ghghghghghghghgh 472 // T16B: Vd = ghghghghghghghghghghghghghghghgh 473 // T4H: Vd = efghefghefghefgh 474 // T8H: Vd = efghefghefghefghefghefghefghefgh 475 // T2S: Vd = abcdefghabcdefgh 476 // T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 477 // T1D/T2D: invalid 478 void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); 479 480 // macro instructions for accessing and updating floating point 481 // status register 482 // 483 // FPSR : op1 == 011 484 // CRn == 0100 485 // CRm == 0100 486 // op2 == 001 487 488 inline void get_fpsr(Register reg) 489 { 490 mrs(0b11, 0b0100, 0b0100, 0b001, reg); 491 } 492 493 inline void set_fpsr(Register reg) 494 { 495 msr(0b011, 0b0100, 0b0100, 0b001, reg); 496 } 497 498 inline void clear_fpsr() 499 { 500 msr(0b011, 0b0100, 0b0100, 0b001, zr); 501 } 502 503 // idiv variant which deals with MINLONG as dividend and -1 as divisor 504 int corrected_idivl(Register result, Register ra, Register rb, 505 bool want_remainder, Register tmp = rscratch1); 506 int corrected_idivq(Register result, Register ra, Register rb, 507 bool want_remainder, Register tmp = rscratch1); 508 509 // Support for NULL-checks 510 // 511 // Generates code that causes a NULL OS exception if the content of reg is NULL. 512 // If the accessed location is M[reg + offset] and the offset is known, provide the 513 // offset. No explicit code generation is needed if the offset is within a certain 514 // range (0 <= offset <= page_size). 515 516 virtual void null_check(Register reg, int offset = -1); 517 static bool needs_explicit_null_check(intptr_t offset); 518 519 static address target_addr_for_insn(address insn_addr, unsigned insn); 520 static address target_addr_for_insn(address insn_addr) { 521 unsigned insn = *(unsigned*)insn_addr; 522 return target_addr_for_insn(insn_addr, insn); 523 } 524 525 // Required platform-specific helpers for Label::patch_instructions. 526 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 527 static int pd_patch_instruction_size(address branch, address target); 528 static void pd_patch_instruction(address branch, address target) { 529 pd_patch_instruction_size(branch, target); 530 } 531 static address pd_call_destination(address branch) { 532 return target_addr_for_insn(branch); 533 } 534 #ifndef PRODUCT 535 static void pd_print_patched_instruction(address branch); 536 #endif 537 538 static int patch_oop(address insn_addr, address o); 539 540 void emit_trampoline_stub(int insts_call_instruction_offset, address target); 541 542 // The following 4 methods return the offset of the appropriate move instruction 543 544 // Support for fast byte/short loading with zero extension (depending on particular CPU) 545 int load_unsigned_byte(Register dst, Address src); 546 int load_unsigned_short(Register dst, Address src); 547 548 // Support for fast byte/short loading with sign extension (depending on particular CPU) 549 int load_signed_byte(Register dst, Address src); 550 int load_signed_short(Register dst, Address src); 551 552 int load_signed_byte32(Register dst, Address src); 553 int load_signed_short32(Register dst, Address src); 554 555 // Support for sign-extension (hi:lo = extend_sign(lo)) 556 void extend_sign(Register hi, Register lo); 557 558 // Load and store values by size and signed-ness 559 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 560 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 561 562 // Support for inc/dec with optimal instruction selection depending on value 563 564 // x86_64 aliases an unqualified register/address increment and 565 // decrement to call incrementq and decrementq but also supports 566 // explicitly sized calls to incrementq/decrementq or 567 // incrementl/decrementl 568 569 // for aarch64 the proper convention would be to use 570 // increment/decrement for 64 bit operatons and 571 // incrementw/decrementw for 32 bit operations. so when porting 572 // x86_64 code we can leave calls to increment/decrement as is, 573 // replace incrementq/decrementq with increment/decrement and 574 // replace incrementl/decrementl with incrementw/decrementw. 575 576 // n.b. increment/decrement calls with an Address destination will 577 // need to use a scratch register to load the value to be 578 // incremented. increment/decrement calls which add or subtract a 579 // constant value greater than 2^12 will need to use a 2nd scratch 580 // register to hold the constant. so, a register increment/decrement 581 // may trash rscratch2 and an address increment/decrement trash 582 // rscratch and rscratch2 583 584 void decrementw(Address dst, int value = 1); 585 void decrementw(Register reg, int value = 1); 586 587 void decrement(Register reg, int value = 1); 588 void decrement(Address dst, int value = 1); 589 590 void incrementw(Address dst, int value = 1); 591 void incrementw(Register reg, int value = 1); 592 593 void increment(Register reg, int value = 1); 594 void increment(Address dst, int value = 1); 595 596 597 // Alignment 598 void align(int modulus); 599 600 // Stack frame creation/removal 601 void enter() 602 { 603 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 604 mov(rfp, sp); 605 } 606 void leave() 607 { 608 mov(sp, rfp); 609 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 610 } 611 612 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 613 // The pointer will be loaded into the thread register. 614 void get_thread(Register thread); 615 616 617 // Support for VM calls 618 // 619 // It is imperative that all calls into the VM are handled via the call_VM macros. 620 // They make sure that the stack linkage is setup correctly. call_VM's correspond 621 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 622 623 624 void call_VM(Register oop_result, 625 address entry_point, 626 bool check_exceptions = true); 627 void call_VM(Register oop_result, 628 address entry_point, 629 Register arg_1, 630 bool check_exceptions = true); 631 void call_VM(Register oop_result, 632 address entry_point, 633 Register arg_1, Register arg_2, 634 bool check_exceptions = true); 635 void call_VM(Register oop_result, 636 address entry_point, 637 Register arg_1, Register arg_2, Register arg_3, 638 bool check_exceptions = true); 639 640 // Overloadings with last_Java_sp 641 void call_VM(Register oop_result, 642 Register last_java_sp, 643 address entry_point, 644 int number_of_arguments = 0, 645 bool check_exceptions = true); 646 void call_VM(Register oop_result, 647 Register last_java_sp, 648 address entry_point, 649 Register arg_1, bool 650 check_exceptions = true); 651 void call_VM(Register oop_result, 652 Register last_java_sp, 653 address entry_point, 654 Register arg_1, Register arg_2, 655 bool check_exceptions = true); 656 void call_VM(Register oop_result, 657 Register last_java_sp, 658 address entry_point, 659 Register arg_1, Register arg_2, Register arg_3, 660 bool check_exceptions = true); 661 662 void get_vm_result (Register oop_result, Register thread); 663 void get_vm_result_2(Register metadata_result, Register thread); 664 665 // These always tightly bind to MacroAssembler::call_VM_base 666 // bypassing the virtual implementation 667 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 668 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 669 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 670 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 671 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 672 673 void call_VM_leaf(address entry_point, 674 int number_of_arguments = 0); 675 void call_VM_leaf(address entry_point, 676 Register arg_1); 677 void call_VM_leaf(address entry_point, 678 Register arg_1, Register arg_2); 679 void call_VM_leaf(address entry_point, 680 Register arg_1, Register arg_2, Register arg_3); 681 682 // These always tightly bind to MacroAssembler::call_VM_leaf_base 683 // bypassing the virtual implementation 684 void super_call_VM_leaf(address entry_point); 685 void super_call_VM_leaf(address entry_point, Register arg_1); 686 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 687 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 688 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 689 690 // last Java Frame (fills frame anchor) 691 void set_last_Java_frame(Register last_java_sp, 692 Register last_java_fp, 693 address last_java_pc, 694 Register scratch); 695 696 void set_last_Java_frame(Register last_java_sp, 697 Register last_java_fp, 698 Label &last_java_pc, 699 Register scratch); 700 701 void set_last_Java_frame(Register last_java_sp, 702 Register last_java_fp, 703 Register last_java_pc, 704 Register scratch); 705 706 void reset_last_Java_frame(Register thread, bool clearfp, bool clear_pc); 707 708 // thread in the default location (r15_thread on 64bit) 709 void reset_last_Java_frame(bool clear_fp, bool clear_pc); 710 711 // Stores 712 void store_check(Register obj); // store check for obj - register is destroyed afterwards 713 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 714 715 #if INCLUDE_ALL_GCS 716 717 void g1_write_barrier_pre(Register obj, 718 Register pre_val, 719 Register thread, 720 Register tmp, 721 bool tosca_live, 722 bool expand_call); 723 724 void g1_write_barrier_post(Register store_addr, 725 Register new_val, 726 Register thread, 727 Register tmp, 728 Register tmp2); 729 730 #endif // INCLUDE_ALL_GCS 731 732 // split store_check(Register obj) to enhance instruction interleaving 733 void store_check_part_1(Register obj); 734 void store_check_part_2(Register obj); 735 736 // oop manipulations 737 void load_klass(Register dst, Register src); 738 void store_klass(Register dst, Register src); 739 void cmp_klass(Register oop, Register trial_klass, Register tmp); 740 741 void load_heap_oop(Register dst, Address src); 742 743 void load_heap_oop_not_null(Register dst, Address src); 744 void store_heap_oop(Address dst, Register src); 745 746 // currently unimplemented 747 // Used for storing NULL. All other oop constants should be 748 // stored using routines that take a jobject. 749 void store_heap_oop_null(Address dst); 750 751 void load_prototype_header(Register dst, Register src); 752 753 void store_klass_gap(Register dst, Register src); 754 755 // This dummy is to prevent a call to store_heap_oop from 756 // converting a zero (like NULL) into a Register by giving 757 // the compiler two choices it can't resolve 758 759 void store_heap_oop(Address dst, void* dummy); 760 761 void encode_heap_oop(Register d, Register s); 762 void encode_heap_oop(Register r) { encode_heap_oop(r, r); } 763 void decode_heap_oop(Register d, Register s); 764 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 765 void encode_heap_oop_not_null(Register r); 766 void decode_heap_oop_not_null(Register r); 767 void encode_heap_oop_not_null(Register dst, Register src); 768 void decode_heap_oop_not_null(Register dst, Register src); 769 770 void set_narrow_oop(Register dst, jobject obj); 771 772 void encode_klass_not_null(Register r); 773 void decode_klass_not_null(Register r); 774 void encode_klass_not_null(Register dst, Register src); 775 void decode_klass_not_null(Register dst, Register src); 776 777 void set_narrow_klass(Register dst, Klass* k); 778 779 // if heap base register is used - reinit it with the correct value 780 void reinit_heapbase(); 781 782 DEBUG_ONLY(void verify_heapbase(const char* msg);) 783 784 void push_CPU_state(); 785 void pop_CPU_state() ; 786 787 // Round up to a power of two 788 void round_to(Register reg, int modulus); 789 790 // allocation 791 void eden_allocate( 792 Register obj, // result: pointer to object after successful allocation 793 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 794 int con_size_in_bytes, // object size in bytes if known at compile time 795 Register t1, // temp register 796 Label& slow_case // continuation point if fast allocation fails 797 ); 798 void tlab_allocate( 799 Register obj, // result: pointer to object after successful allocation 800 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 801 int con_size_in_bytes, // object size in bytes if known at compile time 802 Register t1, // temp register 803 Register t2, // temp register 804 Label& slow_case // continuation point if fast allocation fails 805 ); 806 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 807 void verify_tlab(); 808 809 void incr_allocated_bytes(Register thread, 810 Register var_size_in_bytes, int con_size_in_bytes, 811 Register t1 = noreg); 812 813 // interface method calling 814 void lookup_interface_method(Register recv_klass, 815 Register intf_klass, 816 RegisterOrConstant itable_index, 817 Register method_result, 818 Register scan_temp, 819 Label& no_such_interface); 820 821 // virtual method calling 822 // n.b. x86 allows RegisterOrConstant for vtable_index 823 void lookup_virtual_method(Register recv_klass, 824 RegisterOrConstant vtable_index, 825 Register method_result); 826 827 // Test sub_klass against super_klass, with fast and slow paths. 828 829 // The fast path produces a tri-state answer: yes / no / maybe-slow. 830 // One of the three labels can be NULL, meaning take the fall-through. 831 // If super_check_offset is -1, the value is loaded up from super_klass. 832 // No registers are killed, except temp_reg. 833 void check_klass_subtype_fast_path(Register sub_klass, 834 Register super_klass, 835 Register temp_reg, 836 Label* L_success, 837 Label* L_failure, 838 Label* L_slow_path, 839 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 840 841 // The rest of the type check; must be wired to a corresponding fast path. 842 // It does not repeat the fast path logic, so don't use it standalone. 843 // The temp_reg and temp2_reg can be noreg, if no temps are available. 844 // Updates the sub's secondary super cache as necessary. 845 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 846 void check_klass_subtype_slow_path(Register sub_klass, 847 Register super_klass, 848 Register temp_reg, 849 Register temp2_reg, 850 Label* L_success, 851 Label* L_failure, 852 bool set_cond_codes = false); 853 854 // Simplified, combined version, good for typical uses. 855 // Falls through on failure. 856 void check_klass_subtype(Register sub_klass, 857 Register super_klass, 858 Register temp_reg, 859 Label& L_success); 860 861 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 862 863 864 // Debugging 865 866 // only if +VerifyOops 867 void verify_oop(Register reg, const char* s = "broken oop"); 868 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 869 870 // TODO: verify method and klass metadata (compare against vptr?) 871 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 872 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 873 874 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 875 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 876 877 // only if +VerifyFPU 878 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 879 880 // prints msg, dumps registers and stops execution 881 void stop(const char* msg); 882 883 // prints msg and continues 884 void warn(const char* msg); 885 886 static void debug64(char* msg, int64_t pc, int64_t regs[]); 887 888 void untested() { stop("untested"); } 889 890 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 891 892 void should_not_reach_here() { stop("should not reach here"); } 893 894 // Stack overflow checking 895 void bang_stack_with_offset(int offset) { 896 // stack grows down, caller passes positive offset 897 assert(offset > 0, "must bang with negative offset"); 898 mov(rscratch2, -offset); 899 str(zr, Address(sp, rscratch2)); 900 } 901 902 // Writes to stack successive pages until offset reached to check for 903 // stack overflow + shadow pages. Also, clobbers tmp 904 void bang_stack_size(Register size, Register tmp); 905 906 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 907 Register tmp, 908 int offset); 909 910 // Support for serializing memory accesses between threads 911 void serialize_memory(Register thread, Register tmp); 912 913 // Arithmetics 914 915 void addptr(Address dst, int32_t src) { 916 lea(rscratch2, dst); 917 ldr(rscratch1, Address(rscratch2)); 918 add(rscratch1, rscratch1, src); 919 str(rscratch1, Address(rscratch2)); 920 } 921 922 void cmpptr(Register src1, Address src2); 923 924 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 925 Label &suceed, Label *fail); 926 927 void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 928 Label &suceed, Label *fail); 929 930 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 931 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 932 933 void atomic_xchg(Register prev, Register newv, Register addr); 934 void atomic_xchgw(Register prev, Register newv, Register addr); 935 936 void orptr(Address adr, RegisterOrConstant src) { 937 ldr(rscratch2, adr); 938 if (src.is_register()) 939 orr(rscratch2, rscratch2, src.as_register()); 940 else 941 orr(rscratch2, rscratch2, src.as_constant()); 942 str(rscratch2, adr); 943 } 944 945 // Calls 946 947 void trampoline_call(Address entry, CodeBuffer *cbuf = NULL); 948 949 static bool far_branches() { 950 return ReservedCodeCacheSize > branch_range; 951 } 952 953 // Jumps that can reach anywhere in the code cache. 954 // Trashes tmp. 955 void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 956 void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 957 958 static int far_branch_size() { 959 if (far_branches()) { 960 return 3 * 4; // adrp, add, br 961 } else { 962 return 4; 963 } 964 } 965 966 // Emit the CompiledIC call idiom 967 void ic_call(address entry); 968 969 public: 970 971 // Data 972 973 void mov_metadata(Register dst, Metadata* obj); 974 Address allocate_metadata_address(Metadata* obj); 975 Address constant_oop_address(jobject obj); 976 977 void movoop(Register dst, jobject obj, bool immediate = false); 978 979 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 980 void kernel_crc32(Register crc, Register buf, Register len, 981 Register table0, Register table1, Register table2, Register table3, 982 Register tmp, Register tmp2, Register tmp3); 983 984 #undef VIRTUAL 985 986 // Stack push and pop individual 64 bit registers 987 void push(Register src); 988 void pop(Register dst); 989 990 // push all registers onto the stack 991 void pusha(); 992 void popa(); 993 994 void repne_scan(Register addr, Register value, Register count, 995 Register scratch); 996 void repne_scanw(Register addr, Register value, Register count, 997 Register scratch); 998 999 typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm); 1000 typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift); 1001 1002 // If a constant does not fit in an immediate field, generate some 1003 // number of MOV instructions and then perform the operation 1004 void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1005 add_sub_imm_insn insn1, 1006 add_sub_reg_insn insn2); 1007 // Seperate vsn which sets the flags 1008 void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1009 add_sub_imm_insn insn1, 1010 add_sub_reg_insn insn2); 1011 1012 #define WRAP(INSN) \ 1013 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1014 wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1015 } \ 1016 \ 1017 void INSN(Register Rd, Register Rn, Register Rm, \ 1018 enum shift_kind kind, unsigned shift = 0) { \ 1019 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1020 } \ 1021 \ 1022 void INSN(Register Rd, Register Rn, Register Rm) { \ 1023 Assembler::INSN(Rd, Rn, Rm); \ 1024 } \ 1025 \ 1026 void INSN(Register Rd, Register Rn, Register Rm, \ 1027 ext::operation option, int amount = 0) { \ 1028 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1029 } 1030 1031 WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw) 1032 1033 #undef WRAP 1034 #define WRAP(INSN) \ 1035 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1036 wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1037 } \ 1038 \ 1039 void INSN(Register Rd, Register Rn, Register Rm, \ 1040 enum shift_kind kind, unsigned shift = 0) { \ 1041 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1042 } \ 1043 \ 1044 void INSN(Register Rd, Register Rn, Register Rm) { \ 1045 Assembler::INSN(Rd, Rn, Rm); \ 1046 } \ 1047 \ 1048 void INSN(Register Rd, Register Rn, Register Rm, \ 1049 ext::operation option, int amount = 0) { \ 1050 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1051 } 1052 1053 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw) 1054 1055 void add(Register Rd, Register Rn, RegisterOrConstant increment); 1056 void addw(Register Rd, Register Rn, RegisterOrConstant increment); 1057 1058 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset); 1059 1060 void tableswitch(Register index, jint lowbound, jint highbound, 1061 Label &jumptable, Label &jumptable_end, int stride = 1) { 1062 adr(rscratch1, jumptable); 1063 subsw(rscratch2, index, lowbound); 1064 subsw(zr, rscratch2, highbound - lowbound); 1065 br(Assembler::HS, jumptable_end); 1066 add(rscratch1, rscratch1, rscratch2, 1067 ext::sxtw, exact_log2(stride * Assembler::instruction_size)); 1068 br(rscratch1); 1069 } 1070 1071 // Form an address from base + offset in Rd. Rd may or may not 1072 // actually be used: you must use the Address that is returned. It 1073 // is up to you to ensure that the shift provided matches the size 1074 // of your data. 1075 Address form_address(Register Rd, Register base, long byte_offset, int shift); 1076 1077 // Prolog generator routines to support switch between x86 code and 1078 // generated ARM code 1079 1080 // routine to generate an x86 prolog for a stub function which 1081 // bootstraps into the generated ARM code which directly follows the 1082 // stub 1083 // 1084 1085 public: 1086 // enum used for aarch64--x86 linkage to define return type of x86 function 1087 enum ret_type { ret_type_void, ret_type_integral, ret_type_float, ret_type_double}; 1088 1089 #ifdef BUILTIN_SIM 1090 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, address *prolog_ptr = NULL); 1091 #else 1092 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type) { } 1093 #endif 1094 1095 // special version of call_VM_leaf_base needed for aarch64 simulator 1096 // where we need to specify both the gp and fp arg counts and the 1097 // return type so that the linkage routine from aarch64 to x86 and 1098 // back knows which aarch64 registers to copy to x86 registers and 1099 // which x86 result register to copy back to an aarch64 register 1100 1101 void call_VM_leaf_base1( 1102 address entry_point, // the entry point 1103 int number_of_gp_arguments, // the number of gp reg arguments to pass 1104 int number_of_fp_arguments, // the number of fp reg arguments to pass 1105 ret_type type, // the return type for the call 1106 Label* retaddr = NULL 1107 ); 1108 1109 void ldr_constant(Register dest, const Address &const_addr) { 1110 if (NearCpool) { 1111 ldr(dest, const_addr); 1112 } else { 1113 unsigned long offset; 1114 adrp(dest, InternalAddress(const_addr.target()), offset); 1115 ldr(dest, Address(dest, offset)); 1116 } 1117 } 1118 1119 address read_polling_page(Register r, address page, relocInfo::relocType rtype); 1120 address read_polling_page(Register r, relocInfo::relocType rtype); 1121 1122 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1123 void update_byte_crc32(Register crc, Register val, Register table); 1124 void update_word_crc32(Register crc, Register v, Register tmp, 1125 Register table0, Register table1, Register table2, Register table3, 1126 bool upper = false); 1127 1128 void string_compare(Register str1, Register str2, 1129 Register cnt1, Register cnt2, Register result, 1130 Register tmp1); 1131 void string_equals(Register str1, Register str2, 1132 Register cnt, Register result, 1133 Register tmp1); 1134 void char_arrays_equals(Register ary1, Register ary2, 1135 Register result, Register tmp1); 1136 void encode_iso_array(Register src, Register dst, 1137 Register len, Register result, 1138 FloatRegister Vtmp1, FloatRegister Vtmp2, 1139 FloatRegister Vtmp3, FloatRegister Vtmp4); 1140 void string_indexof(Register str1, Register str2, 1141 Register cnt1, Register cnt2, 1142 Register tmp1, Register tmp2, 1143 Register tmp3, Register tmp4, 1144 int int_cnt1, Register result); 1145 private: 1146 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1147 Register src1, Register src2); 1148 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 1149 add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2); 1150 } 1151 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1152 Register y, Register y_idx, Register z, 1153 Register carry, Register product, 1154 Register idx, Register kdx); 1155 void multiply_128_x_128_loop(Register y, Register z, 1156 Register carry, Register carry2, 1157 Register idx, Register jdx, 1158 Register yz_idx1, Register yz_idx2, 1159 Register tmp, Register tmp3, Register tmp4, 1160 Register tmp7, Register product_hi); 1161 public: 1162 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, 1163 Register zlen, Register tmp1, Register tmp2, Register tmp3, 1164 Register tmp4, Register tmp5, Register tmp6, Register tmp7); 1165 // ISB may be needed because of a safepoint 1166 void maybe_isb() { isb(); } 1167 1168 private: 1169 // Return the effective address r + (r1 << ext) + offset. 1170 // Uses rscratch2. 1171 Address offsetted_address(Register r, Register r1, Address::extend ext, 1172 int offset, int size); 1173 }; 1174 1175 #ifdef ASSERT 1176 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1177 #endif 1178 1179 /** 1180 * class SkipIfEqual: 1181 * 1182 * Instantiating this class will result in assembly code being output that will 1183 * jump around any code emitted between the creation of the instance and it's 1184 * automatic destruction at the end of a scope block, depending on the value of 1185 * the flag passed to the constructor, which will be checked at run-time. 1186 */ 1187 class SkipIfEqual { 1188 private: 1189 MacroAssembler* _masm; 1190 Label _label; 1191 1192 public: 1193 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1194 ~SkipIfEqual(); 1195 }; 1196 1197 struct tableswitch { 1198 Register _reg; 1199 int _insn_index; jint _first_key; jint _last_key; 1200 Label _after; 1201 Label _branches; 1202 }; 1203 1204 #endif // CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP