1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/accessDecorators.hpp" 36 #include "oops/compressedOops.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/biasedLocking.hpp" 40 #include "runtime/flags/flagSetting.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/jniHandles.inline.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/os.inline.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/safepointMechanism.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "utilities/align.hpp" 50 #include "utilities/macros.hpp" 51 #ifdef COMPILER2 52 #include "opto/intrinsicnode.hpp" 53 #endif 54 55 #ifdef PRODUCT 56 #define BLOCK_COMMENT(str) /* nothing */ 57 #define STOP(error) stop(error) 58 #else 59 #define BLOCK_COMMENT(str) block_comment(str) 60 #define STOP(error) block_comment(error); stop(error) 61 #endif 62 63 // Convert the raw encoding form into the form expected by the 64 // constructor for Address. 65 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { 66 assert(scale == 0, "not supported"); 67 RelocationHolder rspec; 68 if (disp_reloc != relocInfo::none) { 69 rspec = Relocation::spec_simple(disp_reloc); 70 } 71 72 Register rindex = as_Register(index); 73 if (rindex != G0) { 74 Address madr(as_Register(base), rindex); 75 madr._rspec = rspec; 76 return madr; 77 } else { 78 Address madr(as_Register(base), disp); 79 madr._rspec = rspec; 80 return madr; 81 } 82 } 83 84 Address Argument::address_in_frame() const { 85 // Warning: In LP64 mode disp will occupy more than 10 bits, but 86 // op codes such as ld or ldx, only access disp() to get 87 // their simm13 argument. 88 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; 89 if (is_in()) 90 return Address(FP, disp); // In argument. 91 else 92 return Address(SP, disp); // Out argument. 93 } 94 95 static const char* argumentNames[][2] = { 96 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, 97 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, 98 {"A(n>9)","P(n>9)"} 99 }; 100 101 const char* Argument::name() const { 102 int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; 103 int num = number(); 104 if (num >= nofArgs) num = nofArgs - 1; 105 return argumentNames[num][is_in() ? 1 : 0]; 106 } 107 108 #ifdef ASSERT 109 // On RISC, there's no benefit to verifying instruction boundaries. 110 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 111 #endif 112 113 // Patch instruction inst at offset inst_pos to refer to dest_pos 114 // and return the resulting instruction. 115 // We should have pcs, not offsets, but since all is relative, it will work out 116 // OK. 117 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { 118 int m; // mask for displacement field 119 int v; // new value for displacement field 120 const int word_aligned_ones = -4; 121 switch (inv_op(inst)) { 122 default: ShouldNotReachHere(); 123 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; 124 case branch_op: 125 switch (inv_op2(inst)) { 126 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 127 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 128 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 129 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 130 case bpr_op2: { 131 if (is_cbcond(inst)) { 132 m = wdisp10(word_aligned_ones, 0); 133 v = wdisp10(dest_pos, inst_pos); 134 } else { 135 m = wdisp16(word_aligned_ones, 0); 136 v = wdisp16(dest_pos, inst_pos); 137 } 138 break; 139 } 140 default: ShouldNotReachHere(); 141 } 142 } 143 return inst & ~m | v; 144 } 145 146 // Return the offset of the branch destionation of instruction inst 147 // at offset pos. 148 // Should have pcs, but since all is relative, it works out. 149 int MacroAssembler::branch_destination(int inst, int pos) { 150 int r; 151 switch (inv_op(inst)) { 152 default: ShouldNotReachHere(); 153 case call_op: r = inv_wdisp(inst, pos, 30); break; 154 case branch_op: 155 switch (inv_op2(inst)) { 156 case fbp_op2: r = inv_wdisp( inst, pos, 19); break; 157 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 158 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 159 case br_op2: r = inv_wdisp( inst, pos, 22); break; 160 case bpr_op2: { 161 if (is_cbcond(inst)) { 162 r = inv_wdisp10(inst, pos); 163 } else { 164 r = inv_wdisp16(inst, pos); 165 } 166 break; 167 } 168 default: ShouldNotReachHere(); 169 } 170 } 171 return r; 172 } 173 174 void MacroAssembler::resolve_jobject(Register value, Register tmp) { 175 Label done, not_weak; 176 br_null(value, false, Assembler::pn, done); // Use NULL as-is. 177 delayed()->andcc(value, JNIHandles::weak_tag_mask, G0); // Test for jweak 178 brx(Assembler::zero, true, Assembler::pt, not_weak); 179 delayed()->nop(); 180 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 181 Address(value, -JNIHandles::weak_tag_value), value, tmp); 182 verify_oop(value); 183 br (Assembler::always, true, Assembler::pt, done); 184 delayed()->nop(); 185 bind(not_weak); 186 access_load_at(T_OBJECT, IN_NATIVE, Address(value, 0), value, tmp); 187 verify_oop(value); 188 bind(done); 189 } 190 191 void MacroAssembler::null_check(Register reg, int offset) { 192 if (needs_explicit_null_check((intptr_t)offset)) { 193 // provoke OS NULL exception if reg = NULL by 194 // accessing M[reg] w/o changing any registers 195 ld_ptr(reg, 0, G0); 196 } 197 else { 198 // nothing to do, (later) access of M[reg + offset] 199 // will provoke OS NULL exception if reg = NULL 200 } 201 } 202 203 // Ring buffer jumps 204 205 206 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { 207 assert_not_delayed(); 208 jmpl(r1, r2, G0); 209 } 210 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { 211 assert_not_delayed(); 212 jmp(r1, offset); 213 } 214 215 // This code sequence is relocatable to any address, even on LP64. 216 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { 217 assert_not_delayed(); 218 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 219 // variable length instruction streams. 220 patchable_sethi(addrlit, temp); 221 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. 222 jmpl(a.base(), a.disp(), d); 223 } 224 225 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { 226 jumpl(addrlit, temp, G0, offset, file, line); 227 } 228 229 230 // Conditional breakpoint (for assertion checks in assembly code) 231 void MacroAssembler::breakpoint_trap(Condition c, CC cc) { 232 trap(c, cc, G0, ST_RESERVED_FOR_USER_0); 233 } 234 235 // We want to use ST_BREAKPOINT here, but the debugger is confused by it. 236 void MacroAssembler::breakpoint_trap() { 237 trap(ST_RESERVED_FOR_USER_0); 238 } 239 240 void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) { 241 if (SafepointMechanism::uses_thread_local_poll()) { 242 ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0); 243 // Armed page has poll bit set. 244 and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg); 245 br_notnull(temp_reg, a, Assembler::pn, slow_path); 246 } else { 247 AddressLiteral sync_state(SafepointSynchronize::address_of_state()); 248 249 load_contents(sync_state, temp_reg); 250 cmp(temp_reg, SafepointSynchronize::_not_synchronized); 251 br(Assembler::notEqual, a, Assembler::pn, slow_path); 252 } 253 } 254 255 void MacroAssembler::enter() { 256 Unimplemented(); 257 } 258 259 void MacroAssembler::leave() { 260 Unimplemented(); 261 } 262 263 // Calls to C land 264 265 #ifdef ASSERT 266 // a hook for debugging 267 static Thread* reinitialize_thread() { 268 return Thread::current(); 269 } 270 #else 271 #define reinitialize_thread Thread::current 272 #endif 273 274 #ifdef ASSERT 275 address last_get_thread = NULL; 276 #endif 277 278 // call this when G2_thread is not known to be valid 279 void MacroAssembler::get_thread() { 280 save_frame(0); // to avoid clobbering O0 281 mov(G1, L0); // avoid clobbering G1 282 mov(G5_method, L1); // avoid clobbering G5 283 mov(G3, L2); // avoid clobbering G3 also 284 mov(G4, L5); // avoid clobbering G4 285 #ifdef ASSERT 286 AddressLiteral last_get_thread_addrlit(&last_get_thread); 287 set(last_get_thread_addrlit, L3); 288 rdpc(L4); 289 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 290 #endif 291 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 292 delayed()->nop(); 293 mov(L0, G1); 294 mov(L1, G5_method); 295 mov(L2, G3); 296 mov(L5, G4); 297 restore(O0, 0, G2_thread); 298 } 299 300 static Thread* verify_thread_subroutine(Thread* gthread_value) { 301 Thread* correct_value = Thread::current(); 302 guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); 303 return correct_value; 304 } 305 306 void MacroAssembler::verify_thread() { 307 if (VerifyThread) { 308 // NOTE: this chops off the heads of the 64-bit O registers. 309 // make sure G2_thread contains the right value 310 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod) 311 mov(G1, L1); // avoid clobbering G1 312 // G2 saved below 313 mov(G3, L3); // avoid clobbering G3 314 mov(G4, L4); // avoid clobbering G4 315 mov(G5_method, L5); // avoid clobbering G5_method 316 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); 317 delayed()->mov(G2_thread, O0); 318 319 mov(L1, G1); // Restore G1 320 // G2 restored below 321 mov(L3, G3); // restore G3 322 mov(L4, G4); // restore G4 323 mov(L5, G5_method); // restore G5_method 324 restore(O0, 0, G2_thread); 325 } 326 } 327 328 329 void MacroAssembler::save_thread(const Register thread_cache) { 330 verify_thread(); 331 if (thread_cache->is_valid()) { 332 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 333 mov(G2_thread, thread_cache); 334 } 335 if (VerifyThread) { 336 // smash G2_thread, as if the VM were about to anyway 337 set(0x67676767, G2_thread); 338 } 339 } 340 341 342 void MacroAssembler::restore_thread(const Register thread_cache) { 343 if (thread_cache->is_valid()) { 344 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); 345 mov(thread_cache, G2_thread); 346 verify_thread(); 347 } else { 348 // do it the slow way 349 get_thread(); 350 } 351 } 352 353 354 // %%% maybe get rid of [re]set_last_Java_frame 355 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { 356 assert_not_delayed(); 357 Address flags(G2_thread, JavaThread::frame_anchor_offset() + 358 JavaFrameAnchor::flags_offset()); 359 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); 360 361 // Always set last_Java_pc and flags first because once last_Java_sp is visible 362 // has_last_Java_frame is true and users will look at the rest of the fields. 363 // (Note: flags should always be zero before we get here so doesn't need to be set.) 364 365 #ifdef ASSERT 366 // Verify that flags was zeroed on return to Java 367 Label PcOk; 368 save_frame(0); // to avoid clobbering O0 369 ld_ptr(pc_addr, L0); 370 br_null_short(L0, Assembler::pt, PcOk); 371 STOP("last_Java_pc not zeroed before leaving Java"); 372 bind(PcOk); 373 374 // Verify that flags was zeroed on return to Java 375 Label FlagsOk; 376 ld(flags, L0); 377 tst(L0); 378 br(Assembler::zero, false, Assembler::pt, FlagsOk); 379 delayed() -> restore(); 380 STOP("flags not zeroed before leaving Java"); 381 bind(FlagsOk); 382 #endif /* ASSERT */ 383 // 384 // When returning from calling out from Java mode the frame anchor's last_Java_pc 385 // will always be set to NULL. It is set here so that if we are doing a call to 386 // native (not VM) that we capture the known pc and don't have to rely on the 387 // native call having a standard frame linkage where we can find the pc. 388 389 if (last_Java_pc->is_valid()) { 390 st_ptr(last_Java_pc, pc_addr); 391 } 392 393 #ifdef ASSERT 394 // Make sure that we have an odd stack 395 Label StackOk; 396 andcc(last_java_sp, 0x01, G0); 397 br(Assembler::notZero, false, Assembler::pt, StackOk); 398 delayed()->nop(); 399 STOP("Stack Not Biased in set_last_Java_frame"); 400 bind(StackOk); 401 #endif // ASSERT 402 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); 403 add( last_java_sp, STACK_BIAS, G4_scratch ); 404 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); 405 } 406 407 void MacroAssembler::reset_last_Java_frame(void) { 408 assert_not_delayed(); 409 410 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); 411 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 412 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); 413 414 #ifdef ASSERT 415 // check that it WAS previously set 416 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame 417 ld_ptr(sp_addr, L0); 418 tst(L0); 419 breakpoint_trap(Assembler::zero, Assembler::ptr_cc); 420 restore(); 421 #endif // ASSERT 422 423 st_ptr(G0, sp_addr); 424 // Always return last_Java_pc to zero 425 st_ptr(G0, pc_addr); 426 // Always null flags after return to Java 427 st(G0, flags); 428 } 429 430 431 void MacroAssembler::call_VM_base( 432 Register oop_result, 433 Register thread_cache, 434 Register last_java_sp, 435 address entry_point, 436 int number_of_arguments, 437 bool check_exceptions) 438 { 439 assert_not_delayed(); 440 441 // determine last_java_sp register 442 if (!last_java_sp->is_valid()) { 443 last_java_sp = SP; 444 } 445 // debugging support 446 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 447 448 // 64-bit last_java_sp is biased! 449 set_last_Java_frame(last_java_sp, noreg); 450 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early 451 save_thread(thread_cache); 452 // do the call 453 call(entry_point, relocInfo::runtime_call_type); 454 if (!VerifyThread) 455 delayed()->mov(G2_thread, O0); // pass thread as first argument 456 else 457 delayed()->nop(); // (thread already passed) 458 restore_thread(thread_cache); 459 reset_last_Java_frame(); 460 461 // check for pending exceptions. use Gtemp as scratch register. 462 if (check_exceptions) { 463 check_and_forward_exception(Gtemp); 464 } 465 466 #ifdef ASSERT 467 set(badHeapWordVal, G3); 468 set(badHeapWordVal, G4); 469 set(badHeapWordVal, G5); 470 #endif 471 472 // get oop result if there is one and reset the value in the thread 473 if (oop_result->is_valid()) { 474 get_vm_result(oop_result); 475 } 476 } 477 478 void MacroAssembler::check_and_forward_exception(Register scratch_reg) 479 { 480 Label L; 481 482 check_and_handle_popframe(scratch_reg); 483 check_and_handle_earlyret(scratch_reg); 484 485 Address exception_addr(G2_thread, Thread::pending_exception_offset()); 486 ld_ptr(exception_addr, scratch_reg); 487 br_null_short(scratch_reg, pt, L); 488 // we use O7 linkage so that forward_exception_entry has the issuing PC 489 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 490 delayed()->nop(); 491 bind(L); 492 } 493 494 495 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { 496 } 497 498 499 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 500 } 501 502 503 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 504 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 505 } 506 507 508 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 509 // O0 is reserved for the thread 510 mov(arg_1, O1); 511 call_VM(oop_result, entry_point, 1, check_exceptions); 512 } 513 514 515 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 516 // O0 is reserved for the thread 517 mov(arg_1, O1); 518 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 519 call_VM(oop_result, entry_point, 2, check_exceptions); 520 } 521 522 523 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 524 // O0 is reserved for the thread 525 mov(arg_1, O1); 526 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 527 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 528 call_VM(oop_result, entry_point, 3, check_exceptions); 529 } 530 531 532 533 // Note: The following call_VM overloadings are useful when a "save" 534 // has already been performed by a stub, and the last Java frame is 535 // the previous one. In that case, last_java_sp must be passed as FP 536 // instead of SP. 537 538 539 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { 540 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); 541 } 542 543 544 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 545 // O0 is reserved for the thread 546 mov(arg_1, O1); 547 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 548 } 549 550 551 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 552 // O0 is reserved for the thread 553 mov(arg_1, O1); 554 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 555 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 556 } 557 558 559 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 560 // O0 is reserved for the thread 561 mov(arg_1, O1); 562 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); 563 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); 564 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 565 } 566 567 568 569 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { 570 assert_not_delayed(); 571 save_thread(thread_cache); 572 // do the call 573 call(entry_point, relocInfo::runtime_call_type); 574 delayed()->nop(); 575 restore_thread(thread_cache); 576 #ifdef ASSERT 577 set(badHeapWordVal, G3); 578 set(badHeapWordVal, G4); 579 set(badHeapWordVal, G5); 580 #endif 581 } 582 583 584 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { 585 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); 586 } 587 588 589 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { 590 mov(arg_1, O0); 591 call_VM_leaf(thread_cache, entry_point, 1); 592 } 593 594 595 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 596 mov(arg_1, O0); 597 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 598 call_VM_leaf(thread_cache, entry_point, 2); 599 } 600 601 602 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { 603 mov(arg_1, O0); 604 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); 605 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); 606 call_VM_leaf(thread_cache, entry_point, 3); 607 } 608 609 610 void MacroAssembler::get_vm_result(Register oop_result) { 611 verify_thread(); 612 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 613 ld_ptr( vm_result_addr, oop_result); 614 st_ptr(G0, vm_result_addr); 615 verify_oop(oop_result); 616 } 617 618 619 void MacroAssembler::get_vm_result_2(Register metadata_result) { 620 verify_thread(); 621 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); 622 ld_ptr(vm_result_addr_2, metadata_result); 623 st_ptr(G0, vm_result_addr_2); 624 } 625 626 627 // We require that C code which does not return a value in vm_result will 628 // leave it undisturbed. 629 void MacroAssembler::set_vm_result(Register oop_result) { 630 verify_thread(); 631 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); 632 verify_oop(oop_result); 633 634 # ifdef ASSERT 635 // Check that we are not overwriting any other oop. 636 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod 637 ld_ptr(vm_result_addr, L0); 638 tst(L0); 639 restore(); 640 breakpoint_trap(notZero, Assembler::ptr_cc); 641 // } 642 # endif 643 644 st_ptr(oop_result, vm_result_addr); 645 } 646 647 648 void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { 649 RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); 650 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); 651 relocate(rspec); 652 call(entry, relocInfo::none); 653 if (emit_delay) { 654 delayed()->nop(); 655 } 656 } 657 658 659 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 660 address save_pc; 661 int shiftcnt; 662 #ifdef VALIDATE_PIPELINE 663 assert_no_delay("Cannot put two instructions in delay-slot."); 664 #endif 665 v9_dep(); 666 save_pc = pc(); 667 668 int msb32 = (int) (addrlit.value() >> 32); 669 int lsb32 = (int) (addrlit.value()); 670 671 if (msb32 == 0 && lsb32 >= 0) { 672 Assembler::sethi(lsb32, d, addrlit.rspec()); 673 } 674 else if (msb32 == -1) { 675 Assembler::sethi(~lsb32, d, addrlit.rspec()); 676 xor3(d, ~low10(~0), d); 677 } 678 else { 679 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits 680 if (msb32 & 0x3ff) // Any bits? 681 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 682 if (lsb32 & 0xFFFFFC00) { // done? 683 if ((lsb32 >> 20) & 0xfff) { // Any bits set? 684 sllx(d, 12, d); // Make room for next 12 bits 685 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 686 shiftcnt = 0; // We already shifted 687 } 688 else 689 shiftcnt = 12; 690 if ((lsb32 >> 10) & 0x3ff) { 691 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits 692 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 693 shiftcnt = 0; 694 } 695 else 696 shiftcnt = 10; 697 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd 698 } 699 else 700 sllx(d, 32, d); 701 } 702 // Pad out the instruction sequence so it can be patched later. 703 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && 704 addrlit.rtype() != relocInfo::runtime_call_type)) { 705 while (pc() < (save_pc + (7 * BytesPerInstWord))) 706 nop(); 707 } 708 } 709 710 711 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { 712 internal_sethi(addrlit, d, false); 713 } 714 715 716 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { 717 internal_sethi(addrlit, d, true); 718 } 719 720 721 int MacroAssembler::insts_for_sethi(address a, bool worst_case) { 722 if (worst_case) return 7; 723 intptr_t iaddr = (intptr_t) a; 724 int msb32 = (int) (iaddr >> 32); 725 int lsb32 = (int) (iaddr); 726 int count; 727 if (msb32 == 0 && lsb32 >= 0) 728 count = 1; 729 else if (msb32 == -1) 730 count = 2; 731 else { 732 count = 2; 733 if (msb32 & 0x3ff) 734 count++; 735 if (lsb32 & 0xFFFFFC00 ) { 736 if ((lsb32 >> 20) & 0xfff) count += 2; 737 if ((lsb32 >> 10) & 0x3ff) count += 2; 738 } 739 } 740 return count; 741 } 742 743 int MacroAssembler::worst_case_insts_for_set() { 744 return insts_for_sethi(NULL, true) + 1; 745 } 746 747 748 // Keep in sync with MacroAssembler::insts_for_internal_set 749 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { 750 intptr_t value = addrlit.value(); 751 752 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { 753 // can optimize 754 if (-4096 <= value && value <= 4095) { 755 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) 756 return; 757 } 758 if (inv_hi22(hi22(value)) == value) { 759 sethi(addrlit, d); 760 return; 761 } 762 } 763 assert_no_delay("Cannot put two instructions in delay-slot."); 764 internal_sethi(addrlit, d, ForceRelocatable); 765 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { 766 add(d, addrlit.low10(), d, addrlit.rspec()); 767 } 768 } 769 770 // Keep in sync with MacroAssembler::internal_set 771 int MacroAssembler::insts_for_internal_set(intptr_t value) { 772 // can optimize 773 if (-4096 <= value && value <= 4095) { 774 return 1; 775 } 776 if (inv_hi22(hi22(value)) == value) { 777 return insts_for_sethi((address) value); 778 } 779 int count = insts_for_sethi((address) value); 780 AddressLiteral al(value); 781 if (al.low10() != 0) { 782 count++; 783 } 784 return count; 785 } 786 787 void MacroAssembler::set(const AddressLiteral& al, Register d) { 788 internal_set(al, d, false); 789 } 790 791 void MacroAssembler::set(intptr_t value, Register d) { 792 AddressLiteral al(value); 793 internal_set(al, d, false); 794 } 795 796 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { 797 AddressLiteral al(addr, rspec); 798 internal_set(al, d, false); 799 } 800 801 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { 802 internal_set(al, d, true); 803 } 804 805 void MacroAssembler::patchable_set(intptr_t value, Register d) { 806 AddressLiteral al(value); 807 internal_set(al, d, true); 808 } 809 810 811 void MacroAssembler::set64(jlong value, Register d, Register tmp) { 812 assert_not_delayed(); 813 v9_dep(); 814 815 int hi = (int)(value >> 32); 816 int lo = (int)(value & ~0); 817 int bits_33to2 = (int)((value >> 2) & ~0); 818 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 819 if (Assembler::is_simm13(lo) && value == lo) { 820 or3(G0, lo, d); 821 } else if (hi == 0) { 822 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 823 if (low10(lo) != 0) 824 or3(d, low10(lo), d); 825 } 826 else if ((hi >> 2) == 0) { 827 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 828 sllx(d, 2, d); 829 if (low12(lo) != 0) 830 or3(d, low12(lo), d); 831 } 832 else if (hi == -1) { 833 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 834 xor3(d, low10(lo) ^ ~low10(~0), d); 835 } 836 else if (lo == 0) { 837 if (Assembler::is_simm13(hi)) { 838 or3(G0, hi, d); 839 } else { 840 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 841 if (low10(hi) != 0) 842 or3(d, low10(hi), d); 843 } 844 sllx(d, 32, d); 845 } 846 else { 847 Assembler::sethi(hi, tmp); 848 Assembler::sethi(lo, d); // macro assembler version sign-extends 849 if (low10(hi) != 0) 850 or3 (tmp, low10(hi), tmp); 851 if (low10(lo) != 0) 852 or3 ( d, low10(lo), d); 853 sllx(tmp, 32, tmp); 854 or3 (d, tmp, d); 855 } 856 } 857 858 int MacroAssembler::insts_for_set64(jlong value) { 859 v9_dep(); 860 861 int hi = (int) (value >> 32); 862 int lo = (int) (value & ~0); 863 int count = 0; 864 865 // (Matcher::isSimpleConstant64 knows about the following optimizations.) 866 if (Assembler::is_simm13(lo) && value == lo) { 867 count++; 868 } else if (hi == 0) { 869 count++; 870 if (low10(lo) != 0) 871 count++; 872 } 873 else if (hi == -1) { 874 count += 2; 875 } 876 else if (lo == 0) { 877 if (Assembler::is_simm13(hi)) { 878 count++; 879 } else { 880 count++; 881 if (low10(hi) != 0) 882 count++; 883 } 884 count++; 885 } 886 else { 887 count += 2; 888 if (low10(hi) != 0) 889 count++; 890 if (low10(lo) != 0) 891 count++; 892 count += 2; 893 } 894 return count; 895 } 896 897 // compute size in bytes of sparc frame, given 898 // number of extraWords 899 int MacroAssembler::total_frame_size_in_bytes(int extraWords) { 900 901 int nWords = frame::memory_parameter_word_sp_offset; 902 903 nWords += extraWords; 904 905 if (nWords & 1) ++nWords; // round up to double-word 906 907 return nWords * BytesPerWord; 908 } 909 910 911 // save_frame: given number of "extra" words in frame, 912 // issue approp. save instruction (p 200, v8 manual) 913 914 void MacroAssembler::save_frame(int extraWords) { 915 int delta = -total_frame_size_in_bytes(extraWords); 916 if (is_simm13(delta)) { 917 save(SP, delta, SP); 918 } else { 919 set(delta, G3_scratch); 920 save(SP, G3_scratch, SP); 921 } 922 } 923 924 925 void MacroAssembler::save_frame_c1(int size_in_bytes) { 926 if (is_simm13(-size_in_bytes)) { 927 save(SP, -size_in_bytes, SP); 928 } else { 929 set(-size_in_bytes, G3_scratch); 930 save(SP, G3_scratch, SP); 931 } 932 } 933 934 935 void MacroAssembler::save_frame_and_mov(int extraWords, 936 Register s1, Register d1, 937 Register s2, Register d2) { 938 assert_not_delayed(); 939 940 // The trick here is to use precisely the same memory word 941 // that trap handlers also use to save the register. 942 // This word cannot be used for any other purpose, but 943 // it works fine to save the register's value, whether or not 944 // an interrupt flushes register windows at any given moment! 945 Address s1_addr; 946 if (s1->is_valid() && (s1->is_in() || s1->is_local())) { 947 s1_addr = s1->address_in_saved_window(); 948 st_ptr(s1, s1_addr); 949 } 950 951 Address s2_addr; 952 if (s2->is_valid() && (s2->is_in() || s2->is_local())) { 953 s2_addr = s2->address_in_saved_window(); 954 st_ptr(s2, s2_addr); 955 } 956 957 save_frame(extraWords); 958 959 if (s1_addr.base() == SP) { 960 ld_ptr(s1_addr.after_save(), d1); 961 } else if (s1->is_valid()) { 962 mov(s1->after_save(), d1); 963 } 964 965 if (s2_addr.base() == SP) { 966 ld_ptr(s2_addr.after_save(), d2); 967 } else if (s2->is_valid()) { 968 mov(s2->after_save(), d2); 969 } 970 } 971 972 973 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 974 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 975 int index = oop_recorder()->allocate_metadata_index(obj); 976 RelocationHolder rspec = metadata_Relocation::spec(index); 977 return AddressLiteral((address)obj, rspec); 978 } 979 980 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 981 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 982 int index = oop_recorder()->find_index(obj); 983 RelocationHolder rspec = metadata_Relocation::spec(index); 984 return AddressLiteral((address)obj, rspec); 985 } 986 987 988 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 989 #ifdef ASSERT 990 { 991 ThreadInVMfromUnknown tiv; 992 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 993 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); 994 } 995 #endif 996 int oop_index = oop_recorder()->find_index(obj); 997 return AddressLiteral(obj, oop_Relocation::spec(oop_index)); 998 } 999 1000 void MacroAssembler::set_narrow_oop(jobject obj, Register d) { 1001 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1002 int oop_index = oop_recorder()->find_index(obj); 1003 RelocationHolder rspec = oop_Relocation::spec(oop_index); 1004 1005 assert_not_delayed(); 1006 // Relocation with special format (see relocInfo_sparc.hpp). 1007 relocate(rspec, 1); 1008 // Assembler::sethi(0x3fffff, d); 1009 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); 1010 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1011 add(d, 0x3ff, d); 1012 1013 } 1014 1015 void MacroAssembler::set_narrow_klass(Klass* k, Register d) { 1016 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1017 int klass_index = oop_recorder()->find_index(k); 1018 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 1019 narrowOop encoded_k = CompressedKlassPointers::encode(k); 1020 1021 assert_not_delayed(); 1022 // Relocation with special format (see relocInfo_sparc.hpp). 1023 relocate(rspec, 1); 1024 // Assembler::sethi(encoded_k, d); 1025 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); 1026 // Don't add relocation for 'add'. Do patching during 'sethi' processing. 1027 add(d, low10(encoded_k), d); 1028 1029 } 1030 1031 void MacroAssembler::align(int modulus) { 1032 while (offset() % modulus != 0) nop(); 1033 } 1034 1035 void RegistersForDebugging::print(outputStream* s) { 1036 FlagSetting fs(Debugging, true); 1037 int j; 1038 for (j = 0; j < 8; ++j) { 1039 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } 1040 else { s->print( "fp = " ); os::print_location(s, i[j]); } 1041 } 1042 s->cr(); 1043 1044 for (j = 0; j < 8; ++j) { 1045 s->print("l%d = ", j); os::print_location(s, l[j]); 1046 } 1047 s->cr(); 1048 1049 for (j = 0; j < 8; ++j) { 1050 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } 1051 else { s->print( "sp = " ); os::print_location(s, o[j]); } 1052 } 1053 s->cr(); 1054 1055 for (j = 0; j < 8; ++j) { 1056 s->print("g%d = ", j); os::print_location(s, g[j]); 1057 } 1058 s->cr(); 1059 1060 // print out floats with compression 1061 for (j = 0; j < 32; ) { 1062 jfloat val = f[j]; 1063 int last = j; 1064 for ( ; last+1 < 32; ++last ) { 1065 char b1[1024], b2[1024]; 1066 sprintf(b1, "%f", val); 1067 sprintf(b2, "%f", f[last+1]); 1068 if (strcmp(b1, b2)) 1069 break; 1070 } 1071 s->print("f%d", j); 1072 if ( j != last ) s->print(" - f%d", last); 1073 s->print(" = %f", val); 1074 s->fill_to(25); 1075 s->print_cr(" (0x%x)", *(int*)&val); 1076 j = last + 1; 1077 } 1078 s->cr(); 1079 1080 // and doubles (evens only) 1081 for (j = 0; j < 32; ) { 1082 jdouble val = d[j]; 1083 int last = j; 1084 for ( ; last+1 < 32; ++last ) { 1085 char b1[1024], b2[1024]; 1086 sprintf(b1, "%f", val); 1087 sprintf(b2, "%f", d[last+1]); 1088 if (strcmp(b1, b2)) 1089 break; 1090 } 1091 s->print("d%d", 2 * j); 1092 if ( j != last ) s->print(" - d%d", last); 1093 s->print(" = %f", val); 1094 s->fill_to(30); 1095 s->print("(0x%x)", *(int*)&val); 1096 s->fill_to(42); 1097 s->print_cr("(0x%x)", *(1 + (int*)&val)); 1098 j = last + 1; 1099 } 1100 s->cr(); 1101 } 1102 1103 void RegistersForDebugging::save_registers(MacroAssembler* a) { 1104 a->sub(FP, align_up(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 1105 a->flushw(); 1106 int i; 1107 for (i = 0; i < 8; ++i) { 1108 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 1109 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); 1110 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); 1111 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); 1112 } 1113 for (i = 0; i < 32; ++i) { 1114 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 1115 } 1116 for (i = 0; i < 64; i += 2) { 1117 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 1118 } 1119 } 1120 1121 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { 1122 for (int i = 1; i < 8; ++i) { 1123 a->ld_ptr(r, g_offset(i), as_gRegister(i)); 1124 } 1125 for (int j = 0; j < 32; ++j) { 1126 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 1127 } 1128 for (int k = 0; k < 64; k += 2) { 1129 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 1130 } 1131 } 1132 1133 1134 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 1135 void MacroAssembler::push_fTOS() { 1136 // %%%%%% need to implement this 1137 } 1138 1139 // pops double TOS element from CPU stack and pushes on FPU stack 1140 void MacroAssembler::pop_fTOS() { 1141 // %%%%%% need to implement this 1142 } 1143 1144 void MacroAssembler::empty_FPU_stack() { 1145 // %%%%%% need to implement this 1146 } 1147 1148 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { 1149 // plausibility check for oops 1150 if (!VerifyOops) return; 1151 1152 if (reg == G0) return; // always NULL, which is always an oop 1153 1154 BLOCK_COMMENT("verify_oop {"); 1155 char buffer[64]; 1156 #ifdef COMPILER1 1157 if (CommentedAssembly) { 1158 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); 1159 block_comment(buffer); 1160 } 1161 #endif 1162 1163 const char* real_msg = NULL; 1164 { 1165 ResourceMark rm; 1166 stringStream ss; 1167 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); 1168 real_msg = code_string(ss.as_string()); 1169 } 1170 1171 // Call indirectly to solve generation ordering problem 1172 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1173 1174 // Make some space on stack above the current register window. 1175 // Enough to hold 8 64-bit registers. 1176 add(SP,-8*8,SP); 1177 1178 // Save some 64-bit registers; a normal 'save' chops the heads off 1179 // of 64-bit longs in the 32-bit build. 1180 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1181 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1182 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed 1183 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1184 1185 // Size of set() should stay the same 1186 patchable_set((intptr_t)real_msg, O1); 1187 // Load address to call to into O7 1188 load_ptr_contents(a, O7); 1189 // Register call to verify_oop_subroutine 1190 callr(O7, G0); 1191 delayed()->nop(); 1192 // recover frame size 1193 add(SP, 8*8,SP); 1194 BLOCK_COMMENT("} verify_oop"); 1195 } 1196 1197 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { 1198 // plausibility check for oops 1199 if (!VerifyOops) return; 1200 1201 const char* real_msg = NULL; 1202 { 1203 ResourceMark rm; 1204 stringStream ss; 1205 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); 1206 real_msg = code_string(ss.as_string()); 1207 } 1208 1209 // Call indirectly to solve generation ordering problem 1210 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); 1211 1212 // Make some space on stack above the current register window. 1213 // Enough to hold 8 64-bit registers. 1214 add(SP,-8*8,SP); 1215 1216 // Save some 64-bit registers; a normal 'save' chops the heads off 1217 // of 64-bit longs in the 32-bit build. 1218 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); 1219 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); 1220 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed 1221 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); 1222 1223 // Size of set() should stay the same 1224 patchable_set((intptr_t)real_msg, O1); 1225 // Load address to call to into O7 1226 load_ptr_contents(a, O7); 1227 // Register call to verify_oop_subroutine 1228 callr(O7, G0); 1229 delayed()->nop(); 1230 // recover frame size 1231 add(SP, 8*8,SP); 1232 } 1233 1234 // side-door communication with signalHandler in os_solaris.cpp 1235 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; 1236 1237 // This macro is expanded just once; it creates shared code. Contract: 1238 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY 1239 // registers, including flags. May not use a register 'save', as this blows 1240 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 1241 // call. 1242 void MacroAssembler::verify_oop_subroutine() { 1243 // Leaf call; no frame. 1244 Label succeed, fail, null_or_fail; 1245 1246 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). 1247 // O0 is now the oop to be checked. O7 is the return address. 1248 Register O0_obj = O0; 1249 1250 // Save some more registers for temps. 1251 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); 1252 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); 1253 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); 1254 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); 1255 1256 // Save flags 1257 Register O5_save_flags = O5; 1258 rdccr( O5_save_flags ); 1259 1260 { // count number of verifies 1261 Register O2_adr = O2; 1262 Register O3_accum = O3; 1263 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); 1264 } 1265 1266 Register O2_mask = O2; 1267 Register O3_bits = O3; 1268 Register O4_temp = O4; 1269 1270 // mark lower end of faulting range 1271 assert(_verify_oop_implicit_branch[0] == NULL, "set once"); 1272 _verify_oop_implicit_branch[0] = pc(); 1273 1274 // We can't check the mark oop because it could be in the process of 1275 // locking or unlocking while this is running. 1276 set(Universe::verify_oop_mask (), O2_mask); 1277 set(Universe::verify_oop_bits (), O3_bits); 1278 1279 // assert((obj & oop_mask) == oop_bits); 1280 and3(O0_obj, O2_mask, O4_temp); 1281 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); 1282 1283 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { 1284 // the null_or_fail case is useless; must test for null separately 1285 br_null_short(O0_obj, pn, succeed); 1286 } 1287 1288 // Check the Klass* of this object for being in the right area of memory. 1289 // Cannot do the load in the delay above slot in case O0 is null 1290 load_klass(O0_obj, O0_obj); 1291 // assert((klass != NULL) 1292 br_null_short(O0_obj, pn, fail); 1293 1294 wrccr( O5_save_flags ); // Restore CCR's 1295 1296 // mark upper end of faulting range 1297 _verify_oop_implicit_branch[1] = pc(); 1298 1299 //----------------------- 1300 // all tests pass 1301 bind(succeed); 1302 1303 // Restore prior 64-bit registers 1304 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); 1305 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); 1306 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); 1307 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); 1308 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); 1309 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); 1310 1311 retl(); // Leaf return; restore prior O7 in delay slot 1312 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); 1313 1314 //----------------------- 1315 bind(null_or_fail); // nulls are less common but OK 1316 br_null(O0_obj, false, pt, succeed); 1317 delayed()->wrccr( O5_save_flags ); // Restore CCR's 1318 1319 //----------------------- 1320 // report failure: 1321 bind(fail); 1322 _verify_oop_implicit_branch[2] = pc(); 1323 1324 wrccr( O5_save_flags ); // Restore CCR's 1325 1326 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1327 1328 // stop_subroutine expects message pointer in I1. 1329 mov(I1, O1); 1330 1331 // Restore prior 64-bit registers 1332 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); 1333 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); 1334 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); 1335 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); 1336 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); 1337 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); 1338 1339 // factor long stop-sequence into subroutine to save space 1340 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1341 1342 // call indirectly to solve generation ordering problem 1343 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); 1344 load_ptr_contents(al, O5); 1345 jmpl(O5, 0, O7); 1346 delayed()->nop(); 1347 } 1348 1349 1350 void MacroAssembler::stop(const char* msg) { 1351 // save frame first to get O7 for return address 1352 // add one word to size in case struct is odd number of words long 1353 // It must be doubleword-aligned for storing doubles into it. 1354 1355 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1356 1357 // stop_subroutine expects message pointer in I1. 1358 // Size of set() should stay the same 1359 patchable_set((intptr_t)msg, O1); 1360 1361 // factor long stop-sequence into subroutine to save space 1362 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); 1363 1364 // call indirectly to solve generation ordering problem 1365 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); 1366 load_ptr_contents(a, O5); 1367 jmpl(O5, 0, O7); 1368 delayed()->nop(); 1369 1370 breakpoint_trap(); // make stop actually stop rather than writing 1371 // unnoticeable results in the output files. 1372 1373 // restore(); done in callee to save space! 1374 } 1375 1376 1377 void MacroAssembler::warn(const char* msg) { 1378 save_frame(align_up(sizeof(RegistersForDebugging) / BytesPerWord, 2)); 1379 RegistersForDebugging::save_registers(this); 1380 mov(O0, L0); 1381 // Size of set() should stay the same 1382 patchable_set((intptr_t)msg, O0); 1383 call( CAST_FROM_FN_PTR(address, warning) ); 1384 delayed()->nop(); 1385 // ret(); 1386 // delayed()->restore(); 1387 RegistersForDebugging::restore_registers(this, L0); 1388 restore(); 1389 } 1390 1391 1392 void MacroAssembler::untested(const char* what) { 1393 // We must be able to turn interactive prompting off 1394 // in order to run automated test scripts on the VM 1395 // Use the flag ShowMessageBoxOnError 1396 1397 const char* b = NULL; 1398 { 1399 ResourceMark rm; 1400 stringStream ss; 1401 ss.print("untested: %s", what); 1402 b = code_string(ss.as_string()); 1403 } 1404 if (ShowMessageBoxOnError) { STOP(b); } 1405 else { warn(b); } 1406 } 1407 1408 1409 void MacroAssembler::unimplemented(const char* what) { 1410 const char* buf = NULL; 1411 { 1412 ResourceMark rm; 1413 stringStream ss; 1414 ss.print("unimplemented: %s", what); 1415 buf = code_string(ss.as_string()); 1416 } 1417 stop(buf); 1418 } 1419 1420 1421 void MacroAssembler::stop_subroutine() { 1422 RegistersForDebugging::save_registers(this); 1423 1424 // for the sake of the debugger, stick a PC on the current frame 1425 // (this assumes that the caller has performed an extra "save") 1426 mov(I7, L7); 1427 add(O7, -7 * BytesPerInt, I7); 1428 1429 save_frame(); // one more save to free up another O7 register 1430 mov(I0, O1); // addr of reg save area 1431 1432 // We expect pointer to message in I1. Caller must set it up in O1 1433 mov(I1, O0); // get msg 1434 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); 1435 delayed()->nop(); 1436 1437 restore(); 1438 1439 RegistersForDebugging::restore_registers(this, O0); 1440 1441 save_frame(0); 1442 call(CAST_FROM_FN_PTR(address,breakpoint)); 1443 delayed()->nop(); 1444 restore(); 1445 1446 mov(L7, I7); 1447 retl(); 1448 delayed()->restore(); // see stop above 1449 } 1450 1451 1452 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { 1453 if ( ShowMessageBoxOnError ) { 1454 JavaThread* thread = JavaThread::current(); 1455 JavaThreadState saved_state = thread->thread_state(); 1456 thread->set_thread_state(_thread_in_vm); 1457 { 1458 // In order to get locks work, we need to fake a in_VM state 1459 ttyLocker ttyl; 1460 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); 1461 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 1462 BytecodeCounter::print(); 1463 } 1464 if (os::message_box(msg, "Execution stopped, print registers?")) 1465 regs->print(::tty); 1466 } 1467 BREAKPOINT; 1468 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); 1469 } 1470 else { 1471 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 1472 } 1473 assert(false, "DEBUG MESSAGE: %s", msg); 1474 } 1475 1476 1477 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { 1478 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? 1479 Label no_extras; 1480 br( negative, true, pt, no_extras ); // if neg, clear reg 1481 delayed()->set(0, Rresult); // annuled, so only if taken 1482 bind( no_extras ); 1483 } 1484 1485 1486 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { 1487 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); 1488 bclr(1, Rresult); 1489 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes 1490 } 1491 1492 1493 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { 1494 calc_frame_size(Rextra_words, Rresult); 1495 neg(Rresult); 1496 save(SP, Rresult, SP); 1497 } 1498 1499 1500 // --------------------------------------------------------- 1501 Assembler::RCondition cond2rcond(Assembler::Condition c) { 1502 switch (c) { 1503 /*case zero: */ 1504 case Assembler::equal: return Assembler::rc_z; 1505 case Assembler::lessEqual: return Assembler::rc_lez; 1506 case Assembler::less: return Assembler::rc_lz; 1507 /*case notZero:*/ 1508 case Assembler::notEqual: return Assembler::rc_nz; 1509 case Assembler::greater: return Assembler::rc_gz; 1510 case Assembler::greaterEqual: return Assembler::rc_gez; 1511 } 1512 ShouldNotReachHere(); 1513 return Assembler::rc_z; 1514 } 1515 1516 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS 1517 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { 1518 tst(s1); 1519 br (c, a, p, L); 1520 } 1521 1522 // Compares a pointer register with zero and branches on null. 1523 // Does a test & branch on 32-bit systems and a register-branch on 64-bit. 1524 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { 1525 assert_not_delayed(); 1526 bpr( rc_z, a, p, s1, L ); 1527 } 1528 1529 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { 1530 assert_not_delayed(); 1531 bpr( rc_nz, a, p, s1, L ); 1532 } 1533 1534 // Compare registers and branch with nop in delay slot or cbcond without delay slot. 1535 1536 // Compare integer (32 bit) values (icc only). 1537 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, 1538 Predict p, Label& L) { 1539 assert_not_delayed(); 1540 if (use_cbcond(L)) { 1541 Assembler::cbcond(c, icc, s1, s2, L); 1542 } else { 1543 cmp(s1, s2); 1544 br(c, false, p, L); 1545 delayed()->nop(); 1546 } 1547 } 1548 1549 // Compare integer (32 bit) values (icc only). 1550 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, 1551 Predict p, Label& L) { 1552 assert_not_delayed(); 1553 if (is_simm(simm13a,5) && use_cbcond(L)) { 1554 Assembler::cbcond(c, icc, s1, simm13a, L); 1555 } else { 1556 cmp(s1, simm13a); 1557 br(c, false, p, L); 1558 delayed()->nop(); 1559 } 1560 } 1561 1562 // Branch that tests xcc in LP64 and icc in !LP64 1563 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, 1564 Predict p, Label& L) { 1565 assert_not_delayed(); 1566 if (use_cbcond(L)) { 1567 Assembler::cbcond(c, ptr_cc, s1, s2, L); 1568 } else { 1569 cmp(s1, s2); 1570 brx(c, false, p, L); 1571 delayed()->nop(); 1572 } 1573 } 1574 1575 // Branch that tests xcc in LP64 and icc in !LP64 1576 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, 1577 Predict p, Label& L) { 1578 assert_not_delayed(); 1579 if (is_simm(simm13a,5) && use_cbcond(L)) { 1580 Assembler::cbcond(c, ptr_cc, s1, simm13a, L); 1581 } else { 1582 cmp(s1, simm13a); 1583 brx(c, false, p, L); 1584 delayed()->nop(); 1585 } 1586 } 1587 1588 // Short branch version for compares a pointer with zero. 1589 1590 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { 1591 assert_not_delayed(); 1592 if (use_cbcond(L)) { 1593 Assembler::cbcond(zero, ptr_cc, s1, 0, L); 1594 } else { 1595 br_null(s1, false, p, L); 1596 delayed()->nop(); 1597 } 1598 } 1599 1600 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { 1601 assert_not_delayed(); 1602 if (use_cbcond(L)) { 1603 Assembler::cbcond(notZero, ptr_cc, s1, 0, L); 1604 } else { 1605 br_notnull(s1, false, p, L); 1606 delayed()->nop(); 1607 } 1608 } 1609 1610 // Unconditional short branch 1611 void MacroAssembler::ba_short(Label& L) { 1612 assert_not_delayed(); 1613 if (use_cbcond(L)) { 1614 Assembler::cbcond(equal, icc, G0, G0, L); 1615 } else { 1616 br(always, false, pt, L); 1617 delayed()->nop(); 1618 } 1619 } 1620 1621 // Branch if 'icc' says zero or not (i.e. icc.z == 1|0). 1622 1623 void MacroAssembler::br_icc_zero(bool iszero, Predict p, Label &L) { 1624 assert_not_delayed(); 1625 Condition cf = (iszero ? Assembler::zero : Assembler::notZero); 1626 br(cf, false, p, L); 1627 delayed()->nop(); 1628 } 1629 1630 // instruction sequences factored across compiler & interpreter 1631 1632 1633 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, 1634 Register Rb_hi, Register Rb_low, 1635 Register Rresult) { 1636 1637 Label check_low_parts, done; 1638 1639 cmp(Ra_hi, Rb_hi ); // compare hi parts 1640 br(equal, true, pt, check_low_parts); 1641 delayed()->cmp(Ra_low, Rb_low); // test low parts 1642 1643 // And, with an unsigned comparison, it does not matter if the numbers 1644 // are negative or not. 1645 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. 1646 // The second one is bigger (unsignedly). 1647 1648 // Other notes: The first move in each triplet can be unconditional 1649 // (and therefore probably prefetchable). 1650 // And the equals case for the high part does not need testing, 1651 // since that triplet is reached only after finding the high halves differ. 1652 1653 mov(-1, Rresult); 1654 ba(done); 1655 delayed()->movcc(greater, false, icc, 1, Rresult); 1656 1657 bind(check_low_parts); 1658 1659 mov( -1, Rresult); 1660 movcc(equal, false, icc, 0, Rresult); 1661 movcc(greaterUnsigned, false, icc, 1, Rresult); 1662 1663 bind(done); 1664 } 1665 1666 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 1667 subcc( G0, Rlow, Rlow ); 1668 subc( G0, Rhi, Rhi ); 1669 } 1670 1671 void MacroAssembler::lshl( Register Rin_high, Register Rin_low, 1672 Register Rcount, 1673 Register Rout_high, Register Rout_low, 1674 Register Rtemp ) { 1675 1676 1677 Register Ralt_count = Rtemp; 1678 Register Rxfer_bits = Rtemp; 1679 1680 assert( Ralt_count != Rin_high 1681 && Ralt_count != Rin_low 1682 && Ralt_count != Rcount 1683 && Rxfer_bits != Rin_low 1684 && Rxfer_bits != Rin_high 1685 && Rxfer_bits != Rcount 1686 && Rxfer_bits != Rout_low 1687 && Rout_low != Rin_high, 1688 "register alias checks"); 1689 1690 Label big_shift, done; 1691 1692 // This code can be optimized to use the 64 bit shifts in V9. 1693 // Here we use the 32 bit shifts. 1694 1695 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1696 subcc(Rcount, 31, Ralt_count); 1697 br(greater, true, pn, big_shift); 1698 delayed()->dec(Ralt_count); 1699 1700 // shift < 32 bits, Ralt_count = Rcount-31 1701 1702 // We get the transfer bits by shifting right by 32-count the low 1703 // register. This is done by shifting right by 31-count and then by one 1704 // more to take care of the special (rare) case where count is zero 1705 // (shifting by 32 would not work). 1706 1707 neg(Ralt_count); 1708 1709 // The order of the next two instructions is critical in the case where 1710 // Rin and Rout are the same and should not be reversed. 1711 1712 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count 1713 if (Rcount != Rout_low) { 1714 sll(Rin_low, Rcount, Rout_low); // low half 1715 } 1716 sll(Rin_high, Rcount, Rout_high); 1717 if (Rcount == Rout_low) { 1718 sll(Rin_low, Rcount, Rout_low); // low half 1719 } 1720 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more 1721 ba(done); 1722 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low 1723 1724 // shift >= 32 bits, Ralt_count = Rcount-32 1725 bind(big_shift); 1726 sll(Rin_low, Ralt_count, Rout_high ); 1727 clr(Rout_low); 1728 1729 bind(done); 1730 } 1731 1732 1733 void MacroAssembler::lshr( Register Rin_high, Register Rin_low, 1734 Register Rcount, 1735 Register Rout_high, Register Rout_low, 1736 Register Rtemp ) { 1737 1738 Register Ralt_count = Rtemp; 1739 Register Rxfer_bits = Rtemp; 1740 1741 assert( Ralt_count != Rin_high 1742 && Ralt_count != Rin_low 1743 && Ralt_count != Rcount 1744 && Rxfer_bits != Rin_low 1745 && Rxfer_bits != Rin_high 1746 && Rxfer_bits != Rcount 1747 && Rxfer_bits != Rout_high 1748 && Rout_high != Rin_low, 1749 "register alias checks"); 1750 1751 Label big_shift, done; 1752 1753 // This code can be optimized to use the 64 bit shifts in V9. 1754 // Here we use the 32 bit shifts. 1755 1756 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1757 subcc(Rcount, 31, Ralt_count); 1758 br(greater, true, pn, big_shift); 1759 delayed()->dec(Ralt_count); 1760 1761 // shift < 32 bits, Ralt_count = Rcount-31 1762 1763 // We get the transfer bits by shifting left by 32-count the high 1764 // register. This is done by shifting left by 31-count and then by one 1765 // more to take care of the special (rare) case where count is zero 1766 // (shifting by 32 would not work). 1767 1768 neg(Ralt_count); 1769 if (Rcount != Rout_low) { 1770 srl(Rin_low, Rcount, Rout_low); 1771 } 1772 1773 // The order of the next two instructions is critical in the case where 1774 // Rin and Rout are the same and should not be reversed. 1775 1776 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1777 sra(Rin_high, Rcount, Rout_high ); // high half 1778 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1779 if (Rcount == Rout_low) { 1780 srl(Rin_low, Rcount, Rout_low); 1781 } 1782 ba(done); 1783 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1784 1785 // shift >= 32 bits, Ralt_count = Rcount-32 1786 bind(big_shift); 1787 1788 sra(Rin_high, Ralt_count, Rout_low); 1789 sra(Rin_high, 31, Rout_high); // sign into hi 1790 1791 bind( done ); 1792 } 1793 1794 1795 1796 void MacroAssembler::lushr( Register Rin_high, Register Rin_low, 1797 Register Rcount, 1798 Register Rout_high, Register Rout_low, 1799 Register Rtemp ) { 1800 1801 Register Ralt_count = Rtemp; 1802 Register Rxfer_bits = Rtemp; 1803 1804 assert( Ralt_count != Rin_high 1805 && Ralt_count != Rin_low 1806 && Ralt_count != Rcount 1807 && Rxfer_bits != Rin_low 1808 && Rxfer_bits != Rin_high 1809 && Rxfer_bits != Rcount 1810 && Rxfer_bits != Rout_high 1811 && Rout_high != Rin_low, 1812 "register alias checks"); 1813 1814 Label big_shift, done; 1815 1816 // This code can be optimized to use the 64 bit shifts in V9. 1817 // Here we use the 32 bit shifts. 1818 1819 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits 1820 subcc(Rcount, 31, Ralt_count); 1821 br(greater, true, pn, big_shift); 1822 delayed()->dec(Ralt_count); 1823 1824 // shift < 32 bits, Ralt_count = Rcount-31 1825 1826 // We get the transfer bits by shifting left by 32-count the high 1827 // register. This is done by shifting left by 31-count and then by one 1828 // more to take care of the special (rare) case where count is zero 1829 // (shifting by 32 would not work). 1830 1831 neg(Ralt_count); 1832 if (Rcount != Rout_low) { 1833 srl(Rin_low, Rcount, Rout_low); 1834 } 1835 1836 // The order of the next two instructions is critical in the case where 1837 // Rin and Rout are the same and should not be reversed. 1838 1839 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count 1840 srl(Rin_high, Rcount, Rout_high ); // high half 1841 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more 1842 if (Rcount == Rout_low) { 1843 srl(Rin_low, Rcount, Rout_low); 1844 } 1845 ba(done); 1846 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high 1847 1848 // shift >= 32 bits, Ralt_count = Rcount-32 1849 bind(big_shift); 1850 1851 srl(Rin_high, Ralt_count, Rout_low); 1852 clr(Rout_high); 1853 1854 bind( done ); 1855 } 1856 1857 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { 1858 cmp(Ra, Rb); 1859 mov(-1, Rresult); 1860 movcc(equal, false, xcc, 0, Rresult); 1861 movcc(greater, false, xcc, 1, Rresult); 1862 } 1863 1864 1865 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { 1866 switch (size_in_bytes) { 1867 case 8: ld_long(src, dst); break; 1868 case 4: ld( src, dst); break; 1869 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; 1870 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; 1871 default: ShouldNotReachHere(); 1872 } 1873 } 1874 1875 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 1876 switch (size_in_bytes) { 1877 case 8: st_long(src, dst); break; 1878 case 4: st( src, dst); break; 1879 case 2: sth( src, dst); break; 1880 case 1: stb( src, dst); break; 1881 default: ShouldNotReachHere(); 1882 } 1883 } 1884 1885 1886 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 1887 FloatRegister Fa, FloatRegister Fb, 1888 Register Rresult) { 1889 if (is_float) { 1890 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 1891 } else { 1892 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 1893 } 1894 1895 if (unordered_result == 1) { 1896 mov( -1, Rresult); 1897 movcc(f_equal, true, fcc0, 0, Rresult); 1898 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 1899 } else { 1900 mov( -1, Rresult); 1901 movcc(f_equal, true, fcc0, 0, Rresult); 1902 movcc(f_greater, true, fcc0, 1, Rresult); 1903 } 1904 } 1905 1906 1907 void MacroAssembler::save_all_globals_into_locals() { 1908 mov(G1,L1); 1909 mov(G2,L2); 1910 mov(G3,L3); 1911 mov(G4,L4); 1912 mov(G5,L5); 1913 mov(G6,L6); 1914 mov(G7,L7); 1915 } 1916 1917 void MacroAssembler::restore_globals_from_locals() { 1918 mov(L1,G1); 1919 mov(L2,G2); 1920 mov(L3,G3); 1921 mov(L4,G4); 1922 mov(L5,G5); 1923 mov(L6,G6); 1924 mov(L7,G7); 1925 } 1926 1927 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1928 Register tmp, 1929 int offset) { 1930 intptr_t value = *delayed_value_addr; 1931 if (value != 0) 1932 return RegisterOrConstant(value + offset); 1933 1934 // load indirectly to solve generation ordering problem 1935 AddressLiteral a(delayed_value_addr); 1936 load_ptr_contents(a, tmp); 1937 1938 #ifdef ASSERT 1939 tst(tmp); 1940 breakpoint_trap(zero, xcc); 1941 #endif 1942 1943 if (offset != 0) 1944 add(tmp, offset, tmp); 1945 1946 return RegisterOrConstant(tmp); 1947 } 1948 1949 1950 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1951 assert(d.register_or_noreg() != G0, "lost side effect"); 1952 if ((s2.is_constant() && s2.as_constant() == 0) || 1953 (s2.is_register() && s2.as_register() == G0)) { 1954 // Do nothing, just move value. 1955 if (s1.is_register()) { 1956 if (d.is_constant()) d = temp; 1957 mov(s1.as_register(), d.as_register()); 1958 return d; 1959 } else { 1960 return s1; 1961 } 1962 } 1963 1964 if (s1.is_register()) { 1965 assert_different_registers(s1.as_register(), temp); 1966 if (d.is_constant()) d = temp; 1967 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 1968 return d; 1969 } else { 1970 if (s2.is_register()) { 1971 assert_different_registers(s2.as_register(), temp); 1972 if (d.is_constant()) d = temp; 1973 set(s1.as_constant(), temp); 1974 andn(temp, s2.as_register(), d.as_register()); 1975 return d; 1976 } else { 1977 intptr_t res = s1.as_constant() & ~s2.as_constant(); 1978 return res; 1979 } 1980 } 1981 } 1982 1983 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 1984 assert(d.register_or_noreg() != G0, "lost side effect"); 1985 if ((s2.is_constant() && s2.as_constant() == 0) || 1986 (s2.is_register() && s2.as_register() == G0)) { 1987 // Do nothing, just move value. 1988 if (s1.is_register()) { 1989 if (d.is_constant()) d = temp; 1990 mov(s1.as_register(), d.as_register()); 1991 return d; 1992 } else { 1993 return s1; 1994 } 1995 } 1996 1997 if (s1.is_register()) { 1998 assert_different_registers(s1.as_register(), temp); 1999 if (d.is_constant()) d = temp; 2000 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2001 return d; 2002 } else { 2003 if (s2.is_register()) { 2004 assert_different_registers(s2.as_register(), temp); 2005 if (d.is_constant()) d = temp; 2006 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); 2007 return d; 2008 } else { 2009 intptr_t res = s1.as_constant() + s2.as_constant(); 2010 return res; 2011 } 2012 } 2013 } 2014 2015 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { 2016 assert(d.register_or_noreg() != G0, "lost side effect"); 2017 if (!is_simm13(s2.constant_or_zero())) 2018 s2 = (s2.as_constant() & 0xFF); 2019 if ((s2.is_constant() && s2.as_constant() == 0) || 2020 (s2.is_register() && s2.as_register() == G0)) { 2021 // Do nothing, just move value. 2022 if (s1.is_register()) { 2023 if (d.is_constant()) d = temp; 2024 mov(s1.as_register(), d.as_register()); 2025 return d; 2026 } else { 2027 return s1; 2028 } 2029 } 2030 2031 if (s1.is_register()) { 2032 assert_different_registers(s1.as_register(), temp); 2033 if (d.is_constant()) d = temp; 2034 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); 2035 return d; 2036 } else { 2037 if (s2.is_register()) { 2038 assert_different_registers(s2.as_register(), temp); 2039 if (d.is_constant()) d = temp; 2040 set(s1.as_constant(), temp); 2041 sll_ptr(temp, s2.as_register(), d.as_register()); 2042 return d; 2043 } else { 2044 intptr_t res = s1.as_constant() << s2.as_constant(); 2045 return res; 2046 } 2047 } 2048 } 2049 2050 2051 // Look up the method for a megamorphic invokeinterface call. 2052 // The target method is determined by <intf_klass, itable_index>. 2053 // The receiver klass is in recv_klass. 2054 // On success, the result will be in method_result, and execution falls through. 2055 // On failure, execution transfers to the given label. 2056 void MacroAssembler::lookup_interface_method(Register recv_klass, 2057 Register intf_klass, 2058 RegisterOrConstant itable_index, 2059 Register method_result, 2060 Register scan_temp, 2061 Register sethi_temp, 2062 Label& L_no_such_interface, 2063 bool return_method) { 2064 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 2065 assert(!return_method || itable_index.is_constant() || itable_index.as_register() == method_result, 2066 "caller must use same register for non-constant itable index as for method"); 2067 2068 Label L_no_such_interface_restore; 2069 bool did_save = false; 2070 if (scan_temp == noreg || sethi_temp == noreg) { 2071 Register recv_2 = recv_klass->is_global() ? recv_klass : L0; 2072 Register intf_2 = intf_klass->is_global() ? intf_klass : L1; 2073 assert(method_result->is_global(), "must be able to return value"); 2074 scan_temp = L2; 2075 sethi_temp = L3; 2076 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); 2077 recv_klass = recv_2; 2078 intf_klass = intf_2; 2079 did_save = true; 2080 } 2081 2082 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 2083 int vtable_base = in_bytes(Klass::vtable_start_offset()); 2084 int scan_step = itableOffsetEntry::size() * wordSize; 2085 int vte_size = vtableEntry::size_in_bytes(); 2086 2087 lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); 2088 // %%% We should store the aligned, prescaled offset in the klassoop. 2089 // Then the next several instructions would fold away. 2090 2091 int itb_offset = vtable_base; 2092 int itb_scale = exact_log2(vtableEntry::size_in_bytes()); 2093 sll(scan_temp, itb_scale, scan_temp); 2094 add(scan_temp, itb_offset, scan_temp); 2095 add(recv_klass, scan_temp, scan_temp); 2096 2097 if (return_method) { 2098 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 2099 RegisterOrConstant itable_offset = itable_index; 2100 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); 2101 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); 2102 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); 2103 } 2104 2105 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 2106 // if (scan->interface() == intf) { 2107 // result = (klass + scan->offset() + itable_index); 2108 // } 2109 // } 2110 Label L_search, L_found_method; 2111 2112 for (int peel = 1; peel >= 0; peel--) { 2113 // %%%% Could load both offset and interface in one ldx, if they were 2114 // in the opposite order. This would save a load. 2115 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); 2116 2117 // Check that this entry is non-null. A null entry means that 2118 // the receiver class doesn't implement the interface, and wasn't the 2119 // same as when the caller was compiled. 2120 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); 2121 delayed()->cmp(method_result, intf_klass); 2122 2123 if (peel) { 2124 brx(Assembler::equal, false, Assembler::pt, L_found_method); 2125 } else { 2126 brx(Assembler::notEqual, false, Assembler::pn, L_search); 2127 // (invert the test to fall through to found_method...) 2128 } 2129 delayed()->add(scan_temp, scan_step, scan_temp); 2130 2131 if (!peel) break; 2132 2133 bind(L_search); 2134 } 2135 2136 bind(L_found_method); 2137 2138 if (return_method) { 2139 // Got a hit. 2140 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 2141 // scan_temp[-scan_step] points to the vtable offset we need 2142 ito_offset -= scan_step; 2143 lduw(scan_temp, ito_offset, scan_temp); 2144 ld_ptr(recv_klass, scan_temp, method_result); 2145 } 2146 2147 if (did_save) { 2148 Label L_done; 2149 ba(L_done); 2150 delayed()->restore(); 2151 2152 bind(L_no_such_interface_restore); 2153 ba(L_no_such_interface); 2154 delayed()->restore(); 2155 2156 bind(L_done); 2157 } 2158 } 2159 2160 2161 // virtual method calling 2162 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2163 RegisterOrConstant vtable_index, 2164 Register method_result) { 2165 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 2166 Register sethi_temp = method_result; 2167 const int base = in_bytes(Klass::vtable_start_offset()) + 2168 // method pointer offset within the vtable entry: 2169 vtableEntry::method_offset_in_bytes(); 2170 RegisterOrConstant vtable_offset = vtable_index; 2171 // Each of the following three lines potentially generates an instruction. 2172 // But the total number of address formation instructions will always be 2173 // at most two, and will often be zero. In any case, it will be optimal. 2174 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). 2175 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). 2176 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); 2177 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); 2178 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); 2179 ld_ptr(vtable_entry_addr, method_result); 2180 } 2181 2182 2183 void MacroAssembler::check_klass_subtype(Register sub_klass, 2184 Register super_klass, 2185 Register temp_reg, 2186 Register temp2_reg, 2187 Label& L_success) { 2188 Register sub_2 = sub_klass; 2189 Register sup_2 = super_klass; 2190 if (!sub_2->is_global()) sub_2 = L0; 2191 if (!sup_2->is_global()) sup_2 = L1; 2192 bool did_save = false; 2193 if (temp_reg == noreg || temp2_reg == noreg) { 2194 temp_reg = L2; 2195 temp2_reg = L3; 2196 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2197 sub_klass = sub_2; 2198 super_klass = sup_2; 2199 did_save = true; 2200 } 2201 Label L_failure, L_pop_to_failure, L_pop_to_success; 2202 check_klass_subtype_fast_path(sub_klass, super_klass, 2203 temp_reg, temp2_reg, 2204 (did_save ? &L_pop_to_success : &L_success), 2205 (did_save ? &L_pop_to_failure : &L_failure), NULL); 2206 2207 if (!did_save) 2208 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); 2209 check_klass_subtype_slow_path(sub_2, sup_2, 2210 L2, L3, L4, L5, 2211 NULL, &L_pop_to_failure); 2212 2213 // on success: 2214 bind(L_pop_to_success); 2215 restore(); 2216 ba_short(L_success); 2217 2218 // on failure: 2219 bind(L_pop_to_failure); 2220 restore(); 2221 bind(L_failure); 2222 } 2223 2224 2225 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2226 Register super_klass, 2227 Register temp_reg, 2228 Register temp2_reg, 2229 Label* L_success, 2230 Label* L_failure, 2231 Label* L_slow_path, 2232 RegisterOrConstant super_check_offset) { 2233 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2234 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2235 2236 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2237 bool need_slow_path = (must_load_sco || 2238 super_check_offset.constant_or_zero() == sco_offset); 2239 2240 assert_different_registers(sub_klass, super_klass, temp_reg); 2241 if (super_check_offset.is_register()) { 2242 assert_different_registers(sub_klass, super_klass, temp_reg, 2243 super_check_offset.as_register()); 2244 } else if (must_load_sco) { 2245 assert(temp2_reg != noreg, "supply either a temp or a register offset"); 2246 } 2247 2248 Label L_fallthrough; 2249 int label_nulls = 0; 2250 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2251 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2252 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2253 assert(label_nulls <= 1 || 2254 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2255 "at most one NULL in the batch, usually"); 2256 2257 // If the pointers are equal, we are done (e.g., String[] elements). 2258 // This self-check enables sharing of secondary supertype arrays among 2259 // non-primary types such as array-of-interface. Otherwise, each such 2260 // type would need its own customized SSA. 2261 // We move this check to the front of the fast path because many 2262 // type checks are in fact trivially successful in this manner, 2263 // so we get a nicely predicted branch right at the start of the check. 2264 cmp(super_klass, sub_klass); 2265 brx(Assembler::equal, false, Assembler::pn, *L_success); 2266 delayed()->nop(); 2267 2268 // Check the supertype display: 2269 if (must_load_sco) { 2270 // The super check offset is always positive... 2271 lduw(super_klass, sco_offset, temp2_reg); 2272 super_check_offset = RegisterOrConstant(temp2_reg); 2273 // super_check_offset is register. 2274 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); 2275 } 2276 ld_ptr(sub_klass, super_check_offset, temp_reg); 2277 cmp(super_klass, temp_reg); 2278 2279 // This check has worked decisively for primary supers. 2280 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2281 // (Secondary supers are interfaces and very deeply nested subtypes.) 2282 // This works in the same check above because of a tricky aliasing 2283 // between the super_cache and the primary super display elements. 2284 // (The 'super_check_addr' can address either, as the case requires.) 2285 // Note that the cache is updated below if it does not help us find 2286 // what we need immediately. 2287 // So if it was a primary super, we can just fail immediately. 2288 // Otherwise, it's the slow path for us (no success at this point). 2289 2290 // Hacked ba(), which may only be used just before L_fallthrough. 2291 #define FINAL_JUMP(label) \ 2292 if (&(label) != &L_fallthrough) { \ 2293 ba(label); delayed()->nop(); \ 2294 } 2295 2296 if (super_check_offset.is_register()) { 2297 brx(Assembler::equal, false, Assembler::pn, *L_success); 2298 delayed()->cmp(super_check_offset.as_register(), sc_offset); 2299 2300 if (L_failure == &L_fallthrough) { 2301 brx(Assembler::equal, false, Assembler::pt, *L_slow_path); 2302 delayed()->nop(); 2303 } else { 2304 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2305 delayed()->nop(); 2306 FINAL_JUMP(*L_slow_path); 2307 } 2308 } else if (super_check_offset.as_constant() == sc_offset) { 2309 // Need a slow path; fast failure is impossible. 2310 if (L_slow_path == &L_fallthrough) { 2311 brx(Assembler::equal, false, Assembler::pt, *L_success); 2312 delayed()->nop(); 2313 } else { 2314 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); 2315 delayed()->nop(); 2316 FINAL_JUMP(*L_success); 2317 } 2318 } else { 2319 // No slow path; it's a fast decision. 2320 if (L_failure == &L_fallthrough) { 2321 brx(Assembler::equal, false, Assembler::pt, *L_success); 2322 delayed()->nop(); 2323 } else { 2324 brx(Assembler::notEqual, false, Assembler::pn, *L_failure); 2325 delayed()->nop(); 2326 FINAL_JUMP(*L_success); 2327 } 2328 } 2329 2330 bind(L_fallthrough); 2331 2332 #undef FINAL_JUMP 2333 } 2334 2335 2336 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 2337 Register super_klass, 2338 Register count_temp, 2339 Register scan_temp, 2340 Register scratch_reg, 2341 Register coop_reg, 2342 Label* L_success, 2343 Label* L_failure) { 2344 assert_different_registers(sub_klass, super_klass, 2345 count_temp, scan_temp, scratch_reg, coop_reg); 2346 2347 Label L_fallthrough, L_loop; 2348 int label_nulls = 0; 2349 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2350 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2351 assert(label_nulls <= 1, "at most one NULL in the batch"); 2352 2353 // a couple of useful fields in sub_klass: 2354 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2355 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2356 2357 // Do a linear scan of the secondary super-klass chain. 2358 // This code is rarely used, so simplicity is a virtue here. 2359 2360 #ifndef PRODUCT 2361 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 2362 inc_counter((address) pst_counter, count_temp, scan_temp); 2363 #endif 2364 2365 // We will consult the secondary-super array. 2366 ld_ptr(sub_klass, ss_offset, scan_temp); 2367 2368 Register search_key = super_klass; 2369 2370 // Load the array length. (Positive movl does right thing on LP64.) 2371 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); 2372 2373 // Check for empty secondary super list 2374 tst(count_temp); 2375 2376 // In the array of super classes elements are pointer sized. 2377 int element_size = wordSize; 2378 2379 // Top of search loop 2380 bind(L_loop); 2381 br(Assembler::equal, false, Assembler::pn, *L_failure); 2382 delayed()->add(scan_temp, element_size, scan_temp); 2383 2384 // Skip the array header in all array accesses. 2385 int elem_offset = Array<Klass*>::base_offset_in_bytes(); 2386 elem_offset -= element_size; // the scan pointer was pre-incremented also 2387 2388 // Load next super to check 2389 ld_ptr( scan_temp, elem_offset, scratch_reg ); 2390 2391 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list 2392 cmp(scratch_reg, search_key); 2393 2394 // A miss means we are NOT a subtype and need to keep looping 2395 brx(Assembler::notEqual, false, Assembler::pn, L_loop); 2396 delayed()->deccc(count_temp); // decrement trip counter in delay slot 2397 2398 // Success. Cache the super we found and proceed in triumph. 2399 st_ptr(super_klass, sub_klass, sc_offset); 2400 2401 if (L_success != &L_fallthrough) { 2402 ba(*L_success); 2403 delayed()->nop(); 2404 } 2405 2406 bind(L_fallthrough); 2407 } 2408 2409 2410 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2411 Register temp_reg, 2412 int extra_slot_offset) { 2413 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2414 int stackElementSize = Interpreter::stackElementSize; 2415 int offset = extra_slot_offset * stackElementSize; 2416 if (arg_slot.is_constant()) { 2417 offset += arg_slot.as_constant() * stackElementSize; 2418 return offset; 2419 } else { 2420 assert(temp_reg != noreg, "must specify"); 2421 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); 2422 if (offset != 0) 2423 add(temp_reg, offset, temp_reg); 2424 return temp_reg; 2425 } 2426 } 2427 2428 2429 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2430 Register temp_reg, 2431 int extra_slot_offset) { 2432 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); 2433 } 2434 2435 2436 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, 2437 Register temp_reg, 2438 Label& done, Label* slow_case, 2439 BiasedLockingCounters* counters) { 2440 assert(UseBiasedLocking, "why call this otherwise?"); 2441 2442 if (PrintBiasedLockingStatistics) { 2443 assert_different_registers(obj_reg, mark_reg, temp_reg, O7); 2444 if (counters == NULL) 2445 counters = BiasedLocking::counters(); 2446 } 2447 2448 Label cas_label; 2449 2450 // Biased locking 2451 // See whether the lock is currently biased toward our thread and 2452 // whether the epoch is still valid 2453 // Note that the runtime guarantees sufficient alignment of JavaThread 2454 // pointers to allow age to be placed into low bits 2455 assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout"); 2456 and3(mark_reg, markWord::biased_lock_mask_in_place, temp_reg); 2457 cmp_and_brx_short(temp_reg, markWord::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); 2458 2459 load_klass(obj_reg, temp_reg); 2460 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2461 or3(G2_thread, temp_reg, temp_reg); 2462 xor3(mark_reg, temp_reg, temp_reg); 2463 andcc(temp_reg, ~((int) markWord::age_mask_in_place), temp_reg); 2464 if (counters != NULL) { 2465 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); 2466 // Reload mark_reg as we may need it later 2467 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); 2468 } 2469 brx(Assembler::equal, true, Assembler::pt, done); 2470 delayed()->nop(); 2471 2472 Label try_revoke_bias; 2473 Label try_rebias; 2474 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 2475 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2476 2477 // At this point we know that the header has the bias pattern and 2478 // that we are not the bias owner in the current epoch. We need to 2479 // figure out more details about the state of the header in order to 2480 // know what operations can be legally performed on the object's 2481 // header. 2482 2483 // If the low three bits in the xor result aren't clear, that means 2484 // the prototype header is no longer biased and we have to revoke 2485 // the bias on this object. 2486 btst(markWord::biased_lock_mask_in_place, temp_reg); 2487 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); 2488 2489 // Biasing is still enabled for this data type. See whether the 2490 // epoch of the current bias is still valid, meaning that the epoch 2491 // bits of the mark word are equal to the epoch bits of the 2492 // prototype header. (Note that the prototype header's epoch bits 2493 // only change at a safepoint.) If not, attempt to rebias the object 2494 // toward the current thread. Note that we must be absolutely sure 2495 // that the current epoch is invalid in order to do this because 2496 // otherwise the manipulations it performs on the mark word are 2497 // illegal. 2498 delayed()->btst(markWord::epoch_mask_in_place, temp_reg); 2499 brx(Assembler::notZero, false, Assembler::pn, try_rebias); 2500 2501 // The epoch of the current bias is still valid but we know nothing 2502 // about the owner; it might be set or it might be clear. Try to 2503 // acquire the bias of the object using an atomic operation. If this 2504 // fails we will go in to the runtime to revoke the object's bias. 2505 // Note that we first construct the presumed unbiased header so we 2506 // don't accidentally blow away another thread's valid bias. 2507 delayed()->and3(mark_reg, 2508 markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place, 2509 mark_reg); 2510 or3(G2_thread, mark_reg, temp_reg); 2511 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2512 // If the biasing toward our thread failed, this means that 2513 // another thread succeeded in biasing it toward itself and we 2514 // need to revoke that bias. The revocation will occur in the 2515 // interpreter runtime in the slow case. 2516 cmp(mark_reg, temp_reg); 2517 if (counters != NULL) { 2518 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); 2519 } 2520 if (slow_case != NULL) { 2521 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2522 delayed()->nop(); 2523 } 2524 ba_short(done); 2525 2526 bind(try_rebias); 2527 // At this point we know the epoch has expired, meaning that the 2528 // current "bias owner", if any, is actually invalid. Under these 2529 // circumstances _only_, we are allowed to use the current header's 2530 // value as the comparison value when doing the cas to acquire the 2531 // bias in the current epoch. In other words, we allow transfer of 2532 // the bias from one thread to another directly in this situation. 2533 // 2534 // FIXME: due to a lack of registers we currently blow away the age 2535 // bits in this situation. Should attempt to preserve them. 2536 load_klass(obj_reg, temp_reg); 2537 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2538 or3(G2_thread, temp_reg, temp_reg); 2539 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2540 // If the biasing toward our thread failed, this means that 2541 // another thread succeeded in biasing it toward itself and we 2542 // need to revoke that bias. The revocation will occur in the 2543 // interpreter runtime in the slow case. 2544 cmp(mark_reg, temp_reg); 2545 if (counters != NULL) { 2546 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); 2547 } 2548 if (slow_case != NULL) { 2549 brx(Assembler::notEqual, true, Assembler::pn, *slow_case); 2550 delayed()->nop(); 2551 } 2552 ba_short(done); 2553 2554 bind(try_revoke_bias); 2555 // The prototype mark in the klass doesn't have the bias bit set any 2556 // more, indicating that objects of this data type are not supposed 2557 // to be biased any more. We are going to try to reset the mark of 2558 // this object to the prototype value and fall through to the 2559 // CAS-based locking scheme. Note that if our CAS fails, it means 2560 // that another thread raced us for the privilege of revoking the 2561 // bias of this particular object, so it's okay to continue in the 2562 // normal locking code. 2563 // 2564 // FIXME: due to a lack of registers we currently blow away the age 2565 // bits in this situation. Should attempt to preserve them. 2566 load_klass(obj_reg, temp_reg); 2567 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 2568 cas_ptr(mark_addr.base(), mark_reg, temp_reg); 2569 // Fall through to the normal CAS-based lock, because no matter what 2570 // the result of the above CAS, some thread must have succeeded in 2571 // removing the bias bit from the object's header. 2572 if (counters != NULL) { 2573 cmp(mark_reg, temp_reg); 2574 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); 2575 } 2576 2577 bind(cas_label); 2578 } 2579 2580 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, 2581 bool allow_delay_slot_filling) { 2582 // Check for biased locking unlock case, which is a no-op 2583 // Note: we do not have to check the thread ID for two reasons. 2584 // First, the interpreter checks for IllegalMonitorStateException at 2585 // a higher level. Second, if the bias was revoked while we held the 2586 // lock, the object could not be rebiased toward another thread, so 2587 // the bias bit would be clear. 2588 ld_ptr(mark_addr, temp_reg); 2589 and3(temp_reg, markWord::biased_lock_mask_in_place, temp_reg); 2590 cmp(temp_reg, markWord::biased_lock_pattern); 2591 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); 2592 delayed(); 2593 if (!allow_delay_slot_filling) { 2594 nop(); 2595 } 2596 } 2597 2598 2599 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 2600 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 2601 // The code could be tightened up considerably. 2602 // 2603 // box->dhw disposition - post-conditions at DONE_LABEL. 2604 // - Successful inflated lock: box->dhw != 0. 2605 // Any non-zero value suffices. 2606 // Consider G2_thread, rsp, boxReg, or markWord::unused_mark() 2607 // - Successful Stack-lock: box->dhw == mark. 2608 // box->dhw must contain the displaced mark word value 2609 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. 2610 // The slow-path fast_enter() and slow_enter() operators 2611 // are responsible for setting box->dhw = NonZero (typically markWord::unused_mark()). 2612 // - Biased: box->dhw is undefined 2613 // 2614 // SPARC refworkload performance - specifically jetstream and scimark - are 2615 // extremely sensitive to the size of the code emitted by compiler_lock_object 2616 // and compiler_unlock_object. Critically, the key factor is code size, not path 2617 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the 2618 // effect). 2619 2620 2621 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, 2622 Register Rbox, Register Rscratch, 2623 BiasedLockingCounters* counters, 2624 bool try_bias) { 2625 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2626 2627 verify_oop(Roop); 2628 Label done ; 2629 2630 if (counters != NULL) { 2631 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); 2632 } 2633 2634 // Aggressively avoid the Store-before-CAS penalty 2635 // Defer the store into box->dhw until after the CAS 2636 Label IsInflated, Recursive ; 2637 2638 // Anticipate CAS -- Avoid RTS->RTO upgrade 2639 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2640 2641 ld_ptr(mark_addr, Rmark); // fetch obj->mark 2642 // Triage: biased, stack-locked, neutral, inflated 2643 2644 if (try_bias) { 2645 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); 2646 // Invariant: if control reaches this point in the emitted stream 2647 // then Rmark has not been modified. 2648 } 2649 andcc(Rmark, 2, G0); 2650 brx(Assembler::notZero, false, Assembler::pn, IsInflated); 2651 delayed()-> // Beware - dangling delay-slot 2652 2653 // Try stack-lock acquisition. 2654 // Transiently install BUSY (0) encoding in the mark word. 2655 // if the CAS of 0 into the mark was successful then we execute: 2656 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box 2657 // ST obj->mark = box -- overwrite transient 0 value 2658 // This presumes TSO, of course. 2659 2660 mov(0, Rscratch); 2661 or3(Rmark, markWord::unlocked_value, Rmark); 2662 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 2663 cas_ptr(mark_addr.base(), Rmark, Rscratch); 2664 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 2665 cmp(Rscratch, Rmark); 2666 brx(Assembler::notZero, false, Assembler::pn, Recursive); 2667 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2668 if (counters != NULL) { 2669 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2670 } 2671 ba(done); 2672 delayed()->st_ptr(Rbox, mark_addr); 2673 2674 bind(Recursive); 2675 // Stack-lock attempt failed - check for recursive stack-lock. 2676 // Tests show that we can remove the recursive case with no impact 2677 // on refworkload 0.83. If we need to reduce the size of the code 2678 // emitted by compiler_lock_object() the recursive case is perfect 2679 // candidate. 2680 // 2681 // A more extreme idea is to always inflate on stack-lock recursion. 2682 // This lets us eliminate the recursive checks in compiler_lock_object 2683 // and compiler_unlock_object and the (box->dhw == 0) encoding. 2684 // A brief experiment - requiring changes to synchronizer.cpp, interpreter, 2685 // and showed a performance *increase*. In the same experiment I eliminated 2686 // the fast-path stack-lock code from the interpreter and always passed 2687 // control to the "slow" operators in synchronizer.cpp. 2688 2689 // RScratch contains the fetched obj->mark value from the failed CAS. 2690 sub(Rscratch, STACK_BIAS, Rscratch); 2691 sub(Rscratch, SP, Rscratch); 2692 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 2693 andcc(Rscratch, 0xfffff003, Rscratch); 2694 if (counters != NULL) { 2695 // Accounting needs the Rscratch register 2696 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2697 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); 2698 ba_short(done); 2699 } else { 2700 ba(done); 2701 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2702 } 2703 2704 bind (IsInflated); 2705 2706 // Try to CAS m->owner from null to Self 2707 // Invariant: if we acquire the lock then _recursions should be 0. 2708 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2709 mov(G2_thread, Rscratch); 2710 cas_ptr(Rmark, G0, Rscratch); 2711 andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success 2712 // set icc.zf : 1=success 0=failure 2713 // ST box->displaced_header = NonZero. 2714 // Any non-zero value suffices: 2715 // markWord::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. 2716 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); 2717 // Intentional fall-through into done 2718 2719 bind (done); 2720 } 2721 2722 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, 2723 Register Rbox, Register Rscratch, 2724 bool try_bias) { 2725 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); 2726 2727 Label done ; 2728 2729 // Beware ... If the aggregate size of the code emitted by CLO and CUO is 2730 // is too large performance rolls abruptly off a cliff. 2731 // This could be related to inlining policies, code cache management, or 2732 // I$ effects. 2733 Label LStacked ; 2734 2735 if (try_bias) { 2736 // TODO: eliminate redundant LDs of obj->mark 2737 biased_locking_exit(mark_addr, Rscratch, done); 2738 } 2739 2740 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); 2741 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); 2742 andcc(Rscratch, Rscratch, G0); 2743 brx(Assembler::zero, false, Assembler::pn, done); 2744 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS 2745 andcc(Rmark, 2, G0); 2746 brx(Assembler::zero, false, Assembler::pt, LStacked); 2747 delayed()->nop(); 2748 2749 // It's inflated 2750 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before 2751 // the ST of 0 into _owner which releases the lock. This prevents loads 2752 // and stores within the critical section from reordering (floating) 2753 // past the store that releases the lock. But TSO is a strong memory model 2754 // and that particular flavor of barrier is a noop, so we can safely elide it. 2755 // Note that we use 1-0 locking by default for the inflated case. We 2756 // close the resultant (and rare) race by having contended threads in 2757 // monitorenter periodically poll _owner. 2758 2759 // 1-0 form : avoids CAS and MEMBAR in the common case 2760 // Do not bother to ratify that m->Owner == Self. 2761 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); 2762 orcc(Rbox, G0, G0); 2763 brx(Assembler::notZero, false, Assembler::pn, done); 2764 delayed()-> 2765 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); 2766 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); 2767 orcc(Rbox, Rscratch, G0); 2768 brx(Assembler::zero, false, Assembler::pt, done); 2769 delayed()-> 2770 st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2771 2772 membar(StoreLoad); 2773 // Check that _succ is (or remains) non-zero 2774 ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); 2775 andcc(Rscratch, Rscratch, G0); 2776 brx(Assembler::notZero, false, Assembler::pt, done); 2777 delayed()->andcc(G0, G0, G0); 2778 add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); 2779 mov(G2_thread, Rscratch); 2780 cas_ptr(Rmark, G0, Rscratch); 2781 cmp(Rscratch, G0); 2782 // invert icc.zf and goto done 2783 // A slightly better v8+/v9 idiom would be the following: 2784 // movrnz Rscratch,1,Rscratch 2785 // ba done 2786 // xorcc Rscratch,1,G0 2787 // In v8+ mode the idiom would be valid IFF Rscratch was a G or O register 2788 brx(Assembler::notZero, false, Assembler::pt, done); 2789 delayed()->cmp(G0, G0); 2790 br(Assembler::always, false, Assembler::pt, done); 2791 delayed()->cmp(G0, 1); 2792 2793 bind (LStacked); 2794 // Consider: we could replace the expensive CAS in the exit 2795 // path with a simple ST of the displaced mark value fetched from 2796 // the on-stack basiclock box. That admits a race where a thread T2 2797 // in the slow lock path -- inflating with monitor M -- could race a 2798 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2. 2799 // More precisely T1 in the stack-lock unlock path could "stomp" the 2800 // inflated mark value M installed by T2, resulting in an orphan 2801 // object monitor M and T2 becoming stranded. We can remedy that situation 2802 // by having T2 periodically poll the object's mark word using timed wait 2803 // operations. If T2 discovers that a stomp has occurred it vacates 2804 // the monitor M and wakes any other threads stranded on the now-orphan M. 2805 // In addition the monitor scavenger, which performs deflation, 2806 // would also need to check for orpan monitors and stranded threads. 2807 // 2808 // Finally, inflation is also used when T2 needs to assign a hashCode 2809 // to O and O is stack-locked by T1. The "stomp" race could cause 2810 // an assigned hashCode value to be lost. We can avoid that condition 2811 // and provide the necessary hashCode stability invariants by ensuring 2812 // that hashCode generation is idempotent between copying GCs. 2813 // For example we could compute the hashCode of an object O as 2814 // O's heap address XOR some high quality RNG value that is refreshed 2815 // at GC-time. The monitor scavenger would install the hashCode 2816 // found in any orphan monitors. Again, the mechanism admits a 2817 // lost-update "stomp" WAW race but detects and recovers as needed. 2818 // 2819 // A prototype implementation showed excellent results, although 2820 // the scavenger and timeout code was rather involved. 2821 2822 cas_ptr(mark_addr.base(), Rbox, Rscratch); 2823 cmp(Rbox, Rscratch); 2824 // Intentional fall through into done ... 2825 2826 bind(done); 2827 } 2828 2829 2830 2831 void MacroAssembler::print_CPU_state() { 2832 // %%%%% need to implement this 2833 } 2834 2835 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 2836 // %%%%% need to implement this 2837 } 2838 2839 void MacroAssembler::push_IU_state() { 2840 // %%%%% need to implement this 2841 } 2842 2843 2844 void MacroAssembler::pop_IU_state() { 2845 // %%%%% need to implement this 2846 } 2847 2848 2849 void MacroAssembler::push_FPU_state() { 2850 // %%%%% need to implement this 2851 } 2852 2853 2854 void MacroAssembler::pop_FPU_state() { 2855 // %%%%% need to implement this 2856 } 2857 2858 2859 void MacroAssembler::push_CPU_state() { 2860 // %%%%% need to implement this 2861 } 2862 2863 2864 void MacroAssembler::pop_CPU_state() { 2865 // %%%%% need to implement this 2866 } 2867 2868 2869 2870 void MacroAssembler::verify_tlab() { 2871 #ifdef ASSERT 2872 if (UseTLAB && VerifyOops) { 2873 Label next, next2, ok; 2874 Register t1 = L0; 2875 Register t2 = L1; 2876 Register t3 = L2; 2877 2878 save_frame(0); 2879 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 2880 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); 2881 or3(t1, t2, t3); 2882 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); 2883 STOP("assert(top >= start)"); 2884 should_not_reach_here(); 2885 2886 bind(next); 2887 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); 2888 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); 2889 or3(t3, t2, t3); 2890 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); 2891 STOP("assert(top <= end)"); 2892 should_not_reach_here(); 2893 2894 bind(next2); 2895 and3(t3, MinObjAlignmentInBytesMask, t3); 2896 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); 2897 STOP("assert(aligned)"); 2898 should_not_reach_here(); 2899 2900 bind(ok); 2901 restore(); 2902 } 2903 #endif 2904 } 2905 2906 2907 void MacroAssembler::eden_allocate( 2908 Register obj, // result: pointer to object after successful allocation 2909 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2910 int con_size_in_bytes, // object size in bytes if known at compile time 2911 Register t1, // temp register 2912 Register t2, // temp register 2913 Label& slow_case // continuation point if fast allocation fails 2914 ){ 2915 // make sure arguments make sense 2916 assert_different_registers(obj, var_size_in_bytes, t1, t2); 2917 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); 2918 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 2919 2920 if (!Universe::heap()->supports_inline_contig_alloc()) { 2921 // No allocation in the shared eden. 2922 ba(slow_case); 2923 delayed()->nop(); 2924 } else { 2925 // get eden boundaries 2926 // note: we need both top & top_addr! 2927 const Register top_addr = t1; 2928 const Register end = t2; 2929 2930 CollectedHeap* ch = Universe::heap(); 2931 set((intx)ch->top_addr(), top_addr); 2932 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); 2933 ld_ptr(top_addr, delta, end); 2934 ld_ptr(top_addr, 0, obj); 2935 2936 // try to allocate 2937 Label retry; 2938 bind(retry); 2939 #ifdef ASSERT 2940 // make sure eden top is properly aligned 2941 { 2942 Label L; 2943 btst(MinObjAlignmentInBytesMask, obj); 2944 br(Assembler::zero, false, Assembler::pt, L); 2945 delayed()->nop(); 2946 STOP("eden top is not properly aligned"); 2947 bind(L); 2948 } 2949 #endif // ASSERT 2950 const Register free = end; 2951 sub(end, obj, free); // compute amount of free space 2952 if (var_size_in_bytes->is_valid()) { 2953 // size is unknown at compile time 2954 cmp(free, var_size_in_bytes); 2955 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 2956 delayed()->add(obj, var_size_in_bytes, end); 2957 } else { 2958 // size is known at compile time 2959 cmp(free, con_size_in_bytes); 2960 brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case 2961 delayed()->add(obj, con_size_in_bytes, end); 2962 } 2963 // Compare obj with the value at top_addr; if still equal, swap the value of 2964 // end with the value at top_addr. If not equal, read the value at top_addr 2965 // into end. 2966 cas_ptr(top_addr, obj, end); 2967 // if someone beat us on the allocation, try again, otherwise continue 2968 cmp(obj, end); 2969 brx(Assembler::notEqual, false, Assembler::pn, retry); 2970 delayed()->mov(end, obj); // nop if successfull since obj == end 2971 2972 #ifdef ASSERT 2973 // make sure eden top is properly aligned 2974 { 2975 Label L; 2976 const Register top_addr = t1; 2977 2978 set((intx)ch->top_addr(), top_addr); 2979 ld_ptr(top_addr, 0, top_addr); 2980 btst(MinObjAlignmentInBytesMask, top_addr); 2981 br(Assembler::zero, false, Assembler::pt, L); 2982 delayed()->nop(); 2983 STOP("eden top is not properly aligned"); 2984 bind(L); 2985 } 2986 #endif // ASSERT 2987 } 2988 } 2989 2990 2991 void MacroAssembler::tlab_allocate( 2992 Register obj, // result: pointer to object after successful allocation 2993 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2994 int con_size_in_bytes, // object size in bytes if known at compile time 2995 Register t1, // temp register 2996 Label& slow_case // continuation point if fast allocation fails 2997 ){ 2998 // make sure arguments make sense 2999 assert_different_registers(obj, var_size_in_bytes, t1); 3000 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 3001 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 3002 3003 const Register free = t1; 3004 3005 verify_tlab(); 3006 3007 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); 3008 3009 // calculate amount of free space 3010 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); 3011 sub(free, obj, free); 3012 3013 Label done; 3014 if (var_size_in_bytes == noreg) { 3015 cmp(free, con_size_in_bytes); 3016 } else { 3017 cmp(free, var_size_in_bytes); 3018 } 3019 br(Assembler::less, false, Assembler::pn, slow_case); 3020 // calculate the new top pointer 3021 if (var_size_in_bytes == noreg) { 3022 delayed()->add(obj, con_size_in_bytes, free); 3023 } else { 3024 delayed()->add(obj, var_size_in_bytes, free); 3025 } 3026 3027 bind(done); 3028 3029 #ifdef ASSERT 3030 // make sure new free pointer is properly aligned 3031 { 3032 Label L; 3033 btst(MinObjAlignmentInBytesMask, free); 3034 br(Assembler::zero, false, Assembler::pt, L); 3035 delayed()->nop(); 3036 STOP("updated TLAB free is not properly aligned"); 3037 bind(L); 3038 } 3039 #endif // ASSERT 3040 3041 // update the tlab top pointer 3042 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); 3043 verify_tlab(); 3044 } 3045 3046 void MacroAssembler::zero_memory(Register base, Register index) { 3047 assert_different_registers(base, index); 3048 Label loop; 3049 bind(loop); 3050 subcc(index, HeapWordSize, index); 3051 brx(Assembler::greaterEqual, true, Assembler::pt, loop); 3052 delayed()->st_ptr(G0, base, index); 3053 } 3054 3055 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, 3056 Register t1, Register t2) { 3057 // Bump total bytes allocated by this thread 3058 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch 3059 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); 3060 // v8 support has gone the way of the dodo 3061 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); 3062 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); 3063 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); 3064 } 3065 3066 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 3067 switch (cond) { 3068 // Note some conditions are synonyms for others 3069 case Assembler::never: return Assembler::always; 3070 case Assembler::zero: return Assembler::notZero; 3071 case Assembler::lessEqual: return Assembler::greater; 3072 case Assembler::less: return Assembler::greaterEqual; 3073 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; 3074 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; 3075 case Assembler::negative: return Assembler::positive; 3076 case Assembler::overflowSet: return Assembler::overflowClear; 3077 case Assembler::always: return Assembler::never; 3078 case Assembler::notZero: return Assembler::zero; 3079 case Assembler::greater: return Assembler::lessEqual; 3080 case Assembler::greaterEqual: return Assembler::less; 3081 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; 3082 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; 3083 case Assembler::positive: return Assembler::negative; 3084 case Assembler::overflowClear: return Assembler::overflowSet; 3085 } 3086 3087 ShouldNotReachHere(); return Assembler::overflowClear; 3088 } 3089 3090 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, 3091 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { 3092 Condition negated_cond = negate_condition(cond); 3093 Label L; 3094 brx(negated_cond, false, Assembler::pt, L); 3095 delayed()->nop(); 3096 inc_counter(counter_ptr, Rtmp1, Rtmp2); 3097 bind(L); 3098 } 3099 3100 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { 3101 AddressLiteral addrlit(counter_addr); 3102 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. 3103 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. 3104 ld(addr, Rtmp2); 3105 inc(Rtmp2); 3106 st(Rtmp2, addr); 3107 } 3108 3109 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { 3110 inc_counter((address) counter_addr, Rtmp1, Rtmp2); 3111 } 3112 3113 SkipIfEqual::SkipIfEqual( 3114 MacroAssembler* masm, Register temp, const bool* flag_addr, 3115 Assembler::Condition condition) { 3116 _masm = masm; 3117 AddressLiteral flag(flag_addr); 3118 _masm->sethi(flag, temp); 3119 _masm->ldub(temp, flag.low10(), temp); 3120 _masm->tst(temp); 3121 _masm->br(condition, false, Assembler::pt, _label); 3122 _masm->delayed()->nop(); 3123 } 3124 3125 SkipIfEqual::~SkipIfEqual() { 3126 _masm->bind(_label); 3127 } 3128 3129 void MacroAssembler::bang_stack_with_offset(int offset) { 3130 // stack grows down, caller passes positive offset 3131 assert(offset > 0, "must bang with negative offset"); 3132 set((-offset)+STACK_BIAS, G3_scratch); 3133 st(G0, SP, G3_scratch); 3134 } 3135 3136 // Writes to stack successive pages until offset reached to check for 3137 // stack overflow + shadow pages. This clobbers tsp and scratch. 3138 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, 3139 Register Rscratch) { 3140 // Use stack pointer in temp stack pointer 3141 mov(SP, Rtsp); 3142 3143 // Bang stack for total size given plus stack shadow page size. 3144 // Bang one page at a time because a large size can overflow yellow and 3145 // red zones (the bang will fail but stack overflow handling can't tell that 3146 // it was a stack overflow bang vs a regular segv). 3147 int offset = os::vm_page_size(); 3148 Register Roffset = Rscratch; 3149 3150 Label loop; 3151 bind(loop); 3152 set((-offset)+STACK_BIAS, Rscratch); 3153 st(G0, Rtsp, Rscratch); 3154 set(offset, Roffset); 3155 sub(Rsize, Roffset, Rsize); 3156 cmp(Rsize, G0); 3157 br(Assembler::greater, false, Assembler::pn, loop); 3158 delayed()->sub(Rtsp, Roffset, Rtsp); 3159 3160 // Bang down shadow pages too. 3161 // At this point, (tmp-0) is the last address touched, so don't 3162 // touch it again. (It was touched as (tmp-pagesize) but then tmp 3163 // was post-decremented.) Skip this address by starting at i=1, and 3164 // touch a few more pages below. N.B. It is important to touch all 3165 // the way down to and including i=StackShadowPages. 3166 for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { 3167 set((-i*offset)+STACK_BIAS, Rscratch); 3168 st(G0, Rtsp, Rscratch); 3169 } 3170 } 3171 3172 void MacroAssembler::reserved_stack_check() { 3173 // testing if reserved zone needs to be enabled 3174 Label no_reserved_zone_enabling; 3175 3176 ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); 3177 cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); 3178 3179 call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); 3180 3181 AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); 3182 jump_to(stub, G4_scratch); 3183 delayed()->restore(); 3184 3185 should_not_reach_here(); 3186 3187 bind(no_reserved_zone_enabling); 3188 } 3189 // ((OopHandle)result).resolve(); 3190 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 3191 // OopHandle::resolve is an indirection. 3192 access_load_at(T_OBJECT, IN_NATIVE, Address(result, 0), result, tmp); 3193 } 3194 3195 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 3196 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 3197 ld_ptr(method, in_bytes(Method::const_offset()), mirror); 3198 ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); 3199 ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3200 ld_ptr(mirror, mirror_offset, mirror); 3201 resolve_oop_handle(mirror, tmp); 3202 } 3203 3204 void MacroAssembler::load_klass(Register src_oop, Register klass) { 3205 // The number of bytes in this code is used by 3206 // MachCallDynamicJavaNode::ret_addr_offset() 3207 // if this changes, change that. 3208 if (UseCompressedClassPointers) { 3209 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3210 decode_klass_not_null(klass); 3211 } else { 3212 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); 3213 } 3214 } 3215 3216 void MacroAssembler::store_klass(Register klass, Register dst_oop) { 3217 if (UseCompressedClassPointers) { 3218 assert(dst_oop != klass, "not enough registers"); 3219 encode_klass_not_null(klass); 3220 st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3221 } else { 3222 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); 3223 } 3224 } 3225 3226 void MacroAssembler::store_klass_gap(Register s, Register d) { 3227 if (UseCompressedClassPointers) { 3228 assert(s != d, "not enough registers"); 3229 st(s, d, oopDesc::klass_gap_offset_in_bytes()); 3230 } 3231 } 3232 3233 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 3234 Register src, Address dst, Register tmp) { 3235 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3236 decorators = AccessInternal::decorator_fixup(decorators); 3237 bool as_raw = (decorators & AS_RAW) != 0; 3238 if (as_raw) { 3239 bs->BarrierSetAssembler::store_at(this, decorators, type, src, dst, tmp); 3240 } else { 3241 bs->store_at(this, decorators, type, src, dst, tmp); 3242 } 3243 } 3244 3245 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 3246 Address src, Register dst, Register tmp) { 3247 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3248 decorators = AccessInternal::decorator_fixup(decorators); 3249 bool as_raw = (decorators & AS_RAW) != 0; 3250 if (as_raw) { 3251 bs->BarrierSetAssembler::load_at(this, decorators, type, src, dst, tmp); 3252 } else { 3253 bs->load_at(this, decorators, type, src, dst, tmp); 3254 } 3255 } 3256 3257 void MacroAssembler::load_heap_oop(const Address& s, Register d, Register tmp, DecoratorSet decorators) { 3258 access_load_at(T_OBJECT, IN_HEAP | decorators, s, d, tmp); 3259 } 3260 3261 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d, Register tmp, DecoratorSet decorators) { 3262 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2), d, tmp); 3263 } 3264 3265 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d, Register tmp, DecoratorSet decorators) { 3266 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, simm13a), d, tmp); 3267 } 3268 3269 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d, Register tmp, DecoratorSet decorators) { 3270 if (s2.is_constant()) { 3271 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_constant()), d, tmp); 3272 } else { 3273 access_load_at(T_OBJECT, IN_HEAP | decorators, Address(s1, s2.as_register()), d, tmp); 3274 } 3275 } 3276 3277 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2, Register tmp, DecoratorSet decorators) { 3278 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, s2), tmp); 3279 } 3280 3281 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a, Register tmp, DecoratorSet decorators) { 3282 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(s1, simm13a), tmp); 3283 } 3284 3285 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset, Register tmp, DecoratorSet decorators) { 3286 if (a.has_index()) { 3287 assert(!a.has_disp(), "not supported yet"); 3288 assert(offset == 0, "not supported yet"); 3289 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.index()), tmp); 3290 } else { 3291 access_store_at(T_OBJECT, IN_HEAP | decorators, d, Address(a.base(), a.disp() + offset), tmp); 3292 } 3293 } 3294 3295 3296 void MacroAssembler::encode_heap_oop(Register src, Register dst) { 3297 assert (UseCompressedOops, "must be compressed"); 3298 assert (Universe::heap() != NULL, "java heap should be initialized"); 3299 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3300 verify_oop(src); 3301 if (CompressedOops::base() == NULL) { 3302 srlx(src, LogMinObjAlignmentInBytes, dst); 3303 return; 3304 } 3305 Label done; 3306 if (src == dst) { 3307 // optimize for frequent case src == dst 3308 bpr(rc_nz, true, Assembler::pt, src, done); 3309 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken 3310 bind(done); 3311 srlx(src, LogMinObjAlignmentInBytes, dst); 3312 } else { 3313 bpr(rc_z, false, Assembler::pn, src, done); 3314 delayed() -> mov(G0, dst); 3315 // could be moved before branch, and annulate delay, 3316 // but may add some unneeded work decoding null 3317 sub(src, G6_heapbase, dst); 3318 srlx(dst, LogMinObjAlignmentInBytes, dst); 3319 bind(done); 3320 } 3321 } 3322 3323 3324 void MacroAssembler::encode_heap_oop_not_null(Register r) { 3325 assert (UseCompressedOops, "must be compressed"); 3326 assert (Universe::heap() != NULL, "java heap should be initialized"); 3327 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3328 verify_oop(r); 3329 if (CompressedOops::base() != NULL) 3330 sub(r, G6_heapbase, r); 3331 srlx(r, LogMinObjAlignmentInBytes, r); 3332 } 3333 3334 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { 3335 assert (UseCompressedOops, "must be compressed"); 3336 assert (Universe::heap() != NULL, "java heap should be initialized"); 3337 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3338 verify_oop(src); 3339 if (CompressedOops::base() == NULL) { 3340 srlx(src, LogMinObjAlignmentInBytes, dst); 3341 } else { 3342 sub(src, G6_heapbase, dst); 3343 srlx(dst, LogMinObjAlignmentInBytes, dst); 3344 } 3345 } 3346 3347 // Same algorithm as oops.inline.hpp decode_heap_oop. 3348 void MacroAssembler::decode_heap_oop(Register src, Register dst) { 3349 assert (UseCompressedOops, "must be compressed"); 3350 assert (Universe::heap() != NULL, "java heap should be initialized"); 3351 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3352 sllx(src, LogMinObjAlignmentInBytes, dst); 3353 if (CompressedOops::base() != NULL) { 3354 Label done; 3355 bpr(rc_nz, true, Assembler::pt, dst, done); 3356 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken 3357 bind(done); 3358 } 3359 verify_oop(dst); 3360 } 3361 3362 void MacroAssembler::decode_heap_oop_not_null(Register r) { 3363 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3364 // pd_code_size_limit. 3365 // Also do not verify_oop as this is called by verify_oop. 3366 assert (UseCompressedOops, "must be compressed"); 3367 assert (Universe::heap() != NULL, "java heap should be initialized"); 3368 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3369 sllx(r, LogMinObjAlignmentInBytes, r); 3370 if (CompressedOops::base() != NULL) 3371 add(r, G6_heapbase, r); 3372 } 3373 3374 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { 3375 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3376 // pd_code_size_limit. 3377 // Also do not verify_oop as this is called by verify_oop. 3378 assert (UseCompressedOops, "must be compressed"); 3379 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 3380 sllx(src, LogMinObjAlignmentInBytes, dst); 3381 if (CompressedOops::base() != NULL) 3382 add(dst, G6_heapbase, dst); 3383 } 3384 3385 void MacroAssembler::encode_klass_not_null(Register r) { 3386 assert (UseCompressedClassPointers, "must be compressed"); 3387 if (CompressedKlassPointers::base() != NULL) { 3388 assert(r != G6_heapbase, "bad register choice"); 3389 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3390 sub(r, G6_heapbase, r); 3391 if (CompressedKlassPointers::shift() != 0) { 3392 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 3393 srlx(r, LogKlassAlignmentInBytes, r); 3394 } 3395 reinit_heapbase(); 3396 } else { 3397 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3398 srlx(r, CompressedKlassPointers::shift(), r); 3399 } 3400 } 3401 3402 void MacroAssembler::encode_klass_not_null(Register src, Register dst) { 3403 if (src == dst) { 3404 encode_klass_not_null(src); 3405 } else { 3406 assert (UseCompressedClassPointers, "must be compressed"); 3407 if (CompressedKlassPointers::base() != NULL) { 3408 set((intptr_t)CompressedKlassPointers::base(), dst); 3409 sub(src, dst, dst); 3410 if (CompressedKlassPointers::shift() != 0) { 3411 srlx(dst, LogKlassAlignmentInBytes, dst); 3412 } 3413 } else { 3414 // shift src into dst 3415 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3416 srlx(src, CompressedKlassPointers::shift(), dst); 3417 } 3418 } 3419 } 3420 3421 // Function instr_size_for_decode_klass_not_null() counts the instructions 3422 // generated by decode_klass_not_null() and reinit_heapbase(). Hence, if 3423 // the instructions they generate change, then this method needs to be updated. 3424 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3425 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 3426 int num_instrs = 1; // shift src,dst or add 3427 if (CompressedKlassPointers::base() != NULL) { 3428 // set + add + set 3429 num_instrs += insts_for_internal_set((intptr_t)CompressedKlassPointers::base()) + 3430 insts_for_internal_set((intptr_t)CompressedOops::ptrs_base()); 3431 if (CompressedKlassPointers::shift() != 0) { 3432 num_instrs += 1; // sllx 3433 } 3434 } 3435 return num_instrs * BytesPerInstWord; 3436 } 3437 3438 // !!! If the instructions that get generated here change then function 3439 // instr_size_for_decode_klass_not_null() needs to get updated. 3440 void MacroAssembler::decode_klass_not_null(Register r) { 3441 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3442 // pd_code_size_limit. 3443 assert (UseCompressedClassPointers, "must be compressed"); 3444 if (CompressedKlassPointers::base() != NULL) { 3445 assert(r != G6_heapbase, "bad register choice"); 3446 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3447 if (CompressedKlassPointers::shift() != 0) 3448 sllx(r, LogKlassAlignmentInBytes, r); 3449 add(r, G6_heapbase, r); 3450 reinit_heapbase(); 3451 } else { 3452 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3453 sllx(r, CompressedKlassPointers::shift(), r); 3454 } 3455 } 3456 3457 void MacroAssembler::decode_klass_not_null(Register src, Register dst) { 3458 if (src == dst) { 3459 decode_klass_not_null(src); 3460 } else { 3461 // Do not add assert code to this unless you change vtableStubs_sparc.cpp 3462 // pd_code_size_limit. 3463 assert (UseCompressedClassPointers, "must be compressed"); 3464 if (CompressedKlassPointers::base() != NULL) { 3465 if (CompressedKlassPointers::shift() != 0) { 3466 assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); 3467 set((intptr_t)CompressedKlassPointers::base(), G6_heapbase); 3468 sllx(src, LogKlassAlignmentInBytes, dst); 3469 add(dst, G6_heapbase, dst); 3470 reinit_heapbase(); 3471 } else { 3472 set((intptr_t)CompressedKlassPointers::base(), dst); 3473 add(src, dst, dst); 3474 } 3475 } else { 3476 // shift/mov src into dst. 3477 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift() || CompressedKlassPointers::shift() == 0, "decode alg wrong"); 3478 sllx(src, CompressedKlassPointers::shift(), dst); 3479 } 3480 } 3481 } 3482 3483 void MacroAssembler::reinit_heapbase() { 3484 if (UseCompressedOops || UseCompressedClassPointers) { 3485 if (Universe::heap() != NULL) { 3486 set((intptr_t)CompressedOops::ptrs_base(), G6_heapbase); 3487 } else { 3488 AddressLiteral base(CompressedOops::ptrs_base_addr()); 3489 load_ptr_contents(base, G6_heapbase); 3490 } 3491 } 3492 } 3493 3494 #ifdef COMPILER2 3495 3496 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. 3497 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, 3498 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 3499 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { 3500 Label Lloop, Lslow; 3501 assert(UseVIS >= 3, "VIS3 is required"); 3502 assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); 3503 assert_different_registers(ftmp1, ftmp2, ftmp3); 3504 3505 // Check if cnt >= 8 (= 16 bytes) 3506 cmp(cnt, 8); 3507 br(Assembler::less, false, Assembler::pn, Lslow); 3508 delayed()->mov(cnt, result); // copy count 3509 3510 // Check for 8-byte alignment of src and dst 3511 or3(src, dst, tmp1); 3512 andcc(tmp1, 7, G0); 3513 br(Assembler::notZero, false, Assembler::pn, Lslow); 3514 delayed()->nop(); 3515 3516 // Set mask for bshuffle instruction 3517 Register mask = tmp4; 3518 set(0x13579bdf, mask); 3519 bmask(mask, G0, G0); 3520 3521 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters 3522 Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 3523 add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 3524 sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 3525 or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 3526 3527 // Load first 8 bytes 3528 ldx(src, 0, tmp1); 3529 3530 bind(Lloop); 3531 // Load next 8 bytes 3532 ldx(src, 8, tmp2); 3533 3534 // Check for non-latin1 character by testing if the most significant byte of a char is set. 3535 // Although we have to move the data between integer and floating point registers, this is 3536 // still faster than the corresponding VIS instructions (ford/fand/fcmpd). 3537 or3(tmp1, tmp2, tmp3); 3538 btst(tmp3, mask); 3539 // annul zeroing if branch is not taken to preserve original count 3540 brx(Assembler::notZero, true, Assembler::pn, Ldone); 3541 delayed()->mov(G0, result); // 0 - failed 3542 3543 // Move bytes into float register 3544 movxtod(tmp1, ftmp1); 3545 movxtod(tmp2, ftmp2); 3546 3547 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 3548 bshuffle(ftmp1, ftmp2, ftmp3); 3549 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3550 3551 // Increment addresses and decrement count 3552 inc(src, 16); 3553 inc(dst, 8); 3554 dec(cnt, 8); 3555 3556 cmp(cnt, 8); 3557 // annul LDX if branch is not taken to prevent access past end of string 3558 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3559 delayed()->ldx(src, 0, tmp1); 3560 3561 // Fallback to slow version 3562 bind(Lslow); 3563 } 3564 3565 // Compress char[] to byte[]. Return 0 on failure. 3566 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { 3567 Label Lloop; 3568 assert_different_registers(src, dst, cnt, tmp, result); 3569 3570 lduh(src, 0, tmp); 3571 3572 bind(Lloop); 3573 inc(src, sizeof(jchar)); 3574 cmp(tmp, 0xff); 3575 // annul zeroing if branch is not taken to preserve original count 3576 br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc 3577 delayed()->mov(G0, result); // 0 - failed 3578 deccc(cnt); 3579 stb(tmp, dst, 0); 3580 inc(dst); 3581 // annul LDUH if branch is not taken to prevent access past end of string 3582 br(Assembler::notZero, true, Assembler::pt, Lloop); 3583 delayed()->lduh(src, 0, tmp); // hoisted 3584 } 3585 3586 // Inflate byte[] to char[] by inflating 16 bytes at once. 3587 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, 3588 FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { 3589 Label Lloop, Lslow; 3590 assert(UseVIS >= 3, "VIS3 is required"); 3591 assert_different_registers(src, dst, cnt, tmp); 3592 assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); 3593 3594 // Check if cnt >= 8 (= 16 bytes) 3595 cmp(cnt, 8); 3596 br(Assembler::less, false, Assembler::pn, Lslow); 3597 delayed()->nop(); 3598 3599 // Check for 8-byte alignment of src and dst 3600 or3(src, dst, tmp); 3601 andcc(tmp, 7, G0); 3602 br(Assembler::notZero, false, Assembler::pn, Lslow); 3603 // Initialize float register to zero 3604 FloatRegister zerof = ftmp4; 3605 delayed()->fzero(FloatRegisterImpl::D, zerof); 3606 3607 // Load first 8 bytes 3608 ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3609 3610 bind(Lloop); 3611 inc(src, 8); 3612 dec(cnt, 8); 3613 3614 // Inflate the string by interleaving each byte from the source array 3615 // with a zero byte and storing the result in the destination array. 3616 fpmerge(zerof, ftmp1->successor(), ftmp2); 3617 stf(FloatRegisterImpl::D, ftmp2, dst, 8); 3618 fpmerge(zerof, ftmp1, ftmp3); 3619 stf(FloatRegisterImpl::D, ftmp3, dst, 0); 3620 3621 inc(dst, 16); 3622 3623 cmp(cnt, 8); 3624 // annul LDX if branch is not taken to prevent access past end of string 3625 br(Assembler::greaterEqual, true, Assembler::pt, Lloop); 3626 delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); 3627 3628 // Fallback to slow version 3629 bind(Lslow); 3630 } 3631 3632 // Inflate byte[] to char[]. 3633 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { 3634 Label Loop; 3635 assert_different_registers(src, dst, cnt, tmp); 3636 3637 ldub(src, 0, tmp); 3638 bind(Loop); 3639 inc(src); 3640 deccc(cnt); 3641 sth(tmp, dst, 0); 3642 inc(dst, sizeof(jchar)); 3643 // annul LDUB if branch is not taken to prevent access past end of string 3644 br(Assembler::notZero, true, Assembler::pt, Loop); 3645 delayed()->ldub(src, 0, tmp); // hoisted 3646 } 3647 3648 void MacroAssembler::string_compare(Register str1, Register str2, 3649 Register cnt1, Register cnt2, 3650 Register tmp1, Register tmp2, 3651 Register result, int ae) { 3652 Label Ldone, Lloop; 3653 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); 3654 int stride1, stride2; 3655 3656 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 3657 // we interchange str1 and str2 in the UL case and negate the result. 3658 // Like this, str1 is always latin1 encoded, expect for the UU case. 3659 3660 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3661 srl(cnt2, 1, cnt2); 3662 } 3663 3664 // See if the lengths are different, and calculate min in cnt1. 3665 // Save diff in case we need it for a tie-breaker. 3666 Label Lskip; 3667 Register diff = tmp1; 3668 subcc(cnt1, cnt2, diff); 3669 br(Assembler::greater, true, Assembler::pt, Lskip); 3670 // cnt2 is shorter, so use its count: 3671 delayed()->mov(cnt2, cnt1); 3672 bind(Lskip); 3673 3674 // Rename registers 3675 Register limit1 = cnt1; 3676 Register limit2 = limit1; 3677 Register chr1 = result; 3678 Register chr2 = cnt2; 3679 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3680 // We need an additional register to keep track of two limits 3681 assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); 3682 limit2 = tmp2; 3683 } 3684 3685 // Is the minimum length zero? 3686 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity 3687 br(Assembler::equal, true, Assembler::pn, Ldone); 3688 // result is difference in lengths 3689 if (ae == StrIntrinsicNode::UU) { 3690 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 3691 } else { 3692 delayed()->mov(diff, result); 3693 } 3694 3695 // Load first characters 3696 if (ae == StrIntrinsicNode::LL) { 3697 stride1 = stride2 = sizeof(jbyte); 3698 ldub(str1, 0, chr1); 3699 ldub(str2, 0, chr2); 3700 } else if (ae == StrIntrinsicNode::UU) { 3701 stride1 = stride2 = sizeof(jchar); 3702 lduh(str1, 0, chr1); 3703 lduh(str2, 0, chr2); 3704 } else { 3705 stride1 = sizeof(jbyte); 3706 stride2 = sizeof(jchar); 3707 ldub(str1, 0, chr1); 3708 lduh(str2, 0, chr2); 3709 } 3710 3711 // Compare first characters 3712 subcc(chr1, chr2, chr1); 3713 br(Assembler::notZero, false, Assembler::pt, Ldone); 3714 assert(chr1 == result, "result must be pre-placed"); 3715 delayed()->nop(); 3716 3717 // Check if the strings start at same location 3718 cmp(str1, str2); 3719 brx(Assembler::equal, true, Assembler::pn, Ldone); 3720 delayed()->mov(G0, result); // result is zero 3721 3722 // We have no guarantee that on 64 bit the higher half of limit is 0 3723 signx(limit1); 3724 3725 // Get limit 3726 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3727 sll(limit1, 1, limit2); 3728 subcc(limit2, stride2, chr2); 3729 } 3730 subcc(limit1, stride1, chr1); 3731 br(Assembler::zero, true, Assembler::pn, Ldone); 3732 // result is difference in lengths 3733 if (ae == StrIntrinsicNode::UU) { 3734 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars 3735 } else { 3736 delayed()->mov(diff, result); 3737 } 3738 3739 // Shift str1 and str2 to the end of the arrays, negate limit 3740 add(str1, limit1, str1); 3741 add(str2, limit2, str2); 3742 neg(chr1, limit1); // limit1 = -(limit1-stride1) 3743 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3744 neg(chr2, limit2); // limit2 = -(limit2-stride2) 3745 } 3746 3747 // Compare the rest of the characters 3748 load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 3749 3750 bind(Lloop); 3751 load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); 3752 3753 subcc(chr1, chr2, chr1); 3754 br(Assembler::notZero, false, Assembler::pt, Ldone); 3755 assert(chr1 == result, "result must be pre-placed"); 3756 delayed()->inccc(limit1, stride1); 3757 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 3758 inccc(limit2, stride2); 3759 } 3760 3761 // annul LDUB if branch is not taken to prevent access past end of string 3762 br(Assembler::notZero, true, Assembler::pt, Lloop); 3763 delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); 3764 3765 // If strings are equal up to min length, return the length difference. 3766 if (ae == StrIntrinsicNode::UU) { 3767 // Divide by 2 to get number of chars 3768 sra(diff, 1, result); 3769 } else { 3770 mov(diff, result); 3771 } 3772 3773 // Otherwise, return the difference between the first mismatched chars. 3774 bind(Ldone); 3775 if(ae == StrIntrinsicNode::UL) { 3776 // Negate result (see note above) 3777 neg(result); 3778 } 3779 } 3780 3781 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 3782 Register limit, Register tmp, Register result, bool is_byte) { 3783 Label Ldone, Lloop, Lremaining; 3784 assert_different_registers(ary1, ary2, limit, tmp, result); 3785 3786 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3787 int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 3788 assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); 3789 3790 if (is_array_equ) { 3791 // return true if the same array 3792 cmp(ary1, ary2); 3793 brx(Assembler::equal, true, Assembler::pn, Ldone); 3794 delayed()->mov(1, result); // equal 3795 3796 br_null(ary1, true, Assembler::pn, Ldone); 3797 delayed()->clr(result); // not equal 3798 3799 br_null(ary2, true, Assembler::pn, Ldone); 3800 delayed()->clr(result); // not equal 3801 3802 // load the lengths of arrays 3803 ld(Address(ary1, length_offset), limit); 3804 ld(Address(ary2, length_offset), tmp); 3805 3806 // return false if the two arrays are not equal length 3807 cmp(limit, tmp); 3808 br(Assembler::notEqual, true, Assembler::pn, Ldone); 3809 delayed()->clr(result); // not equal 3810 } 3811 3812 cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); 3813 delayed()->mov(1, result); // zero-length arrays are equal 3814 3815 if (is_array_equ) { 3816 // load array addresses 3817 add(ary1, base_offset, ary1); 3818 add(ary2, base_offset, ary2); 3819 // set byte count 3820 if (!is_byte) { 3821 sll(limit, exact_log2(sizeof(jchar)), limit); 3822 } 3823 } else { 3824 // We have no guarantee that on 64 bit the higher half of limit is 0 3825 signx(limit); 3826 } 3827 3828 #ifdef ASSERT 3829 // Sanity check for doubleword (8-byte) alignment of ary1 and ary2. 3830 // Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). 3831 Label Laligned; 3832 or3(ary1, ary2, tmp); 3833 andcc(tmp, 7, tmp); 3834 br_null_short(tmp, Assembler::pn, Laligned); 3835 STOP("First array element is not 8-byte aligned."); 3836 should_not_reach_here(); 3837 bind(Laligned); 3838 #endif 3839 3840 // Shift ary1 and ary2 to the end of the arrays, negate limit 3841 add(ary1, limit, ary1); 3842 add(ary2, limit, ary2); 3843 neg(limit, limit); 3844 3845 // MAIN LOOP 3846 // Load and compare array elements of size 'byte_width' until the elements are not 3847 // equal or we reached the end of the arrays. If the size of the arrays is not a 3848 // multiple of 'byte_width', we simply read over the end of the array, bail out and 3849 // compare the remaining bytes below by skipping the garbage bytes. 3850 ldx(ary1, limit, result); 3851 bind(Lloop); 3852 ldx(ary2, limit, tmp); 3853 inccc(limit, 8); 3854 // Bail out if we reached the end (but still do the comparison) 3855 br(Assembler::positive, false, Assembler::pn, Lremaining); 3856 delayed()->cmp(result, tmp); 3857 // Check equality of elements 3858 brx(Assembler::equal, false, Assembler::pt, target(Lloop)); 3859 delayed()->ldx(ary1, limit, result); 3860 3861 ba(Ldone); 3862 delayed()->clr(result); // not equal 3863 3864 // TAIL COMPARISON 3865 // We got here because we reached the end of the arrays. 'limit' is the number of 3866 // garbage bytes we may have compared by reading over the end of the arrays. Shift 3867 // out the garbage and compare the remaining elements. 3868 bind(Lremaining); 3869 // Optimistic shortcut: elements potentially including garbage are equal 3870 brx(Assembler::equal, true, Assembler::pt, target(Ldone)); 3871 delayed()->mov(1, result); // equal 3872 // Shift 'limit' bytes to the right and compare 3873 sll(limit, 3, limit); // bytes to bits 3874 srlx(result, limit, result); 3875 srlx(tmp, limit, tmp); 3876 cmp(result, tmp); 3877 clr(result); 3878 movcc(Assembler::equal, false, xcc, 1, result); 3879 3880 bind(Ldone); 3881 } 3882 3883 void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { 3884 3885 // test for negative bytes in input string of a given size 3886 // result 1 if found, 0 otherwise. 3887 3888 Label Lcore, Ltail, Lreturn, Lcore_rpt; 3889 3890 assert_different_registers(inp, size, t2, t3, t4, t5, result); 3891 3892 Register i = result; // result used as integer index i until very end 3893 Register lmask = t2; // t2 is aliased to lmask 3894 3895 // INITIALIZATION 3896 // =========================================================== 3897 // initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) 3898 // compute unaligned offset -> i 3899 // compute core end index -> t5 3900 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal 3901 add(t2, 0x80, t2); 3902 sllx(t2, 32, t3); 3903 or3(t3, t2, lmask); // 0x8080808080808080 -> lmask 3904 sra(size,0,size); 3905 andcc(inp, 0x7, i); // unaligned offset -> i 3906 br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? 3907 delayed()->add(size, -8, t5); // (annuled) core end index -> t5 3908 3909 // =========================================================== 3910 3911 // UNALIGNED HEAD 3912 // =========================================================== 3913 // * unaligned head handling: grab aligned 8B containing unaligned inp(ut) 3914 // * obliterate (ignore) bytes outside string by shifting off reg ends 3915 // * compare with bitmask, short circuit return true if one or more high 3916 // bits set. 3917 cmp(size, 0); 3918 br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? 3919 delayed()->mov(0,result); // annuled so i not clobbered for following 3920 neg(i, t4); 3921 add(i, size, t5); 3922 ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 3923 mov(8, t4); 3924 sub(t4, t5, t4); 3925 sra(t4, 31, t5); 3926 andn(t4, t5, t5); 3927 add(i, t5, t4); 3928 sll(t5, 3, t5); 3929 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 3930 srlx(t3, t5, t3); 3931 sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 3932 andcc(lmask, t3, G0); 3933 brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? 3934 delayed()->mov(1,result); // annuled so i not clobbered for following 3935 add(size, -8, t5); // core end index -> t5 3936 mov(8, t4); 3937 sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i 3938 // =========================================================== 3939 3940 // ALIGNED CORE 3941 // =========================================================== 3942 // * iterate index i over aligned 8B sections of core, comparing with 3943 // bitmask, short circuit return true if one or more high bits set 3944 // t5 contains core end index/loop limit which is the index 3945 // of the MSB of last (unaligned) 8B fully contained in the string. 3946 // inp contains address of first byte in string/array 3947 // lmask contains 8B high bit mask for comparison 3948 // i contains next index to be processed (adr. inp+i is on 8B boundary) 3949 bind(Lcore); 3950 cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); 3951 bind(Lcore_rpt); 3952 ldx(inp, i, t3); 3953 andcc(t3, lmask, G0); 3954 brx(Assembler::notZero, true, Assembler::pn, Lreturn); 3955 delayed()->mov(1, result); // annuled so i not clobbered for following 3956 add(i, 8, i); 3957 cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); 3958 // =========================================================== 3959 3960 // ALIGNED TAIL (<8B) 3961 // =========================================================== 3962 // handle aligned tail of 7B or less as complete 8B, obliterating end of 3963 // string bytes by shifting them off end, compare what's left with bitmask 3964 // inp contains address of first byte in string/array 3965 // lmask contains 8B high bit mask for comparison 3966 // i contains next index to be processed (adr. inp+i is on 8B boundary) 3967 bind(Ltail); 3968 subcc(size, i, t4); // # of remaining bytes in string -> t4 3969 // return 0 if no more remaining bytes 3970 br(Assembler::lessEqual, true, Assembler::pn, Lreturn); 3971 delayed()->mov(0, result); // annuled so i not clobbered for following 3972 ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 3973 mov(8, t5); 3974 sub(t5, t4, t4); 3975 mov(0, result); // ** i clobbered at this point 3976 sll(t4, 3, t4); // bits beyond end of string -> t4 3977 srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 3978 andcc(lmask, t3, G0); 3979 movcc(Assembler::notZero, false, xcc, 1, result); 3980 bind(Lreturn); 3981 } 3982 3983 #endif 3984 3985 3986 // Use BIS for zeroing (count is in bytes). 3987 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { 3988 assert(UseBlockZeroing && VM_Version::has_blk_zeroing(), "only works with BIS zeroing"); 3989 Register end = count; 3990 int cache_line_size = VM_Version::prefetch_data_size(); 3991 assert(cache_line_size > 0, "cache line size should be known for this code"); 3992 // Minimum count when BIS zeroing can be used since 3993 // it needs membar which is expensive. 3994 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); 3995 3996 Label small_loop; 3997 // Check if count is negative (dead code) or zero. 3998 // Note, count uses 64bit in 64 bit VM. 3999 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); 4000 4001 // Use BIS zeroing only for big arrays since it requires membar. 4002 if (Assembler::is_simm13(block_zero_size)) { // < 4096 4003 cmp(count, block_zero_size); 4004 } else { 4005 set(block_zero_size, temp); 4006 cmp(count, temp); 4007 } 4008 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); 4009 delayed()->add(to, count, end); 4010 4011 // Note: size is >= three (32 bytes) cache lines. 4012 4013 // Clean the beginning of space up to next cache line. 4014 for (int offs = 0; offs < cache_line_size; offs += 8) { 4015 stx(G0, to, offs); 4016 } 4017 4018 // align to next cache line 4019 add(to, cache_line_size, to); 4020 and3(to, -cache_line_size, to); 4021 4022 // Note: size left >= two (32 bytes) cache lines. 4023 4024 // BIS should not be used to zero tail (64 bytes) 4025 // to avoid zeroing a header of the following object. 4026 sub(end, (cache_line_size*2)-8, end); 4027 4028 Label bis_loop; 4029 bind(bis_loop); 4030 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 4031 add(to, cache_line_size, to); 4032 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); 4033 4034 // BIS needs membar. 4035 membar(Assembler::StoreLoad); 4036 4037 add(end, (cache_line_size*2)-8, end); // restore end 4038 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); 4039 4040 // Clean the tail. 4041 bind(small_loop); 4042 stx(G0, to, 0); 4043 add(to, 8, to); 4044 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); 4045 nop(); // Separate short branches 4046 } 4047 4048 /** 4049 * Update CRC-32[C] with a byte value according to constants in table 4050 * 4051 * @param [in,out]crc Register containing the crc. 4052 * @param [in]val Register containing the byte to fold into the CRC. 4053 * @param [in]table Register containing the table of crc constants. 4054 * 4055 * uint32_t crc; 4056 * val = crc_table[(val ^ crc) & 0xFF]; 4057 * crc = val ^ (crc >> 8); 4058 */ 4059 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4060 xor3(val, crc, val); 4061 and3(val, 0xFF, val); 4062 sllx(val, 2, val); 4063 lduw(table, val, val); 4064 srlx(crc, 8, crc); 4065 xor3(val, crc, crc); 4066 } 4067 4068 // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros 4069 void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { 4070 srlx(src, 24, dst); 4071 4072 sllx(src, 32+8, tmp); 4073 srlx(tmp, 32+24, tmp); 4074 sllx(tmp, 8, tmp); 4075 or3(dst, tmp, dst); 4076 4077 sllx(src, 32+16, tmp); 4078 srlx(tmp, 32+24, tmp); 4079 sllx(tmp, 16, tmp); 4080 or3(dst, tmp, dst); 4081 4082 sllx(src, 32+24, tmp); 4083 srlx(tmp, 32, tmp); 4084 or3(dst, tmp, dst); 4085 } 4086 4087 void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { 4088 reverse_bytes_32(src, tmp1, tmp2); 4089 movxtod(tmp1, dst); 4090 } 4091 4092 void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { 4093 movdtox(src, tmp1); 4094 reverse_bytes_32(tmp1, dst, tmp2); 4095 } 4096 4097 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { 4098 xmulx(xcrc_hi, xK_hi, xtmp_lo); 4099 xmulxhi(xcrc_hi, xK_hi, xtmp_hi); 4100 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4101 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4102 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4103 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4104 ldxl(buf, G0, xtmp_lo); 4105 inc(buf, 8); 4106 ldxl(buf, G0, xtmp_hi); 4107 inc(buf, 8); 4108 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4109 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4110 } 4111 4112 void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { 4113 mov(xcrc_lo, xtmp_lo); 4114 mov(xcrc_hi, xtmp_hi); 4115 xmulx(xtmp_hi, xK_hi, xtmp_lo); 4116 xmulxhi(xtmp_hi, xK_hi, xtmp_hi); 4117 xmulxhi(xcrc_lo, xK_lo, xcrc_hi); 4118 xmulx(xcrc_lo, xK_lo, xcrc_lo); 4119 xor3(xcrc_lo, xbuf_lo, xcrc_lo); 4120 xor3(xcrc_hi, xbuf_hi, xcrc_hi); 4121 xor3(xcrc_lo, xtmp_lo, xcrc_lo); 4122 xor3(xcrc_hi, xtmp_hi, xcrc_hi); 4123 } 4124 4125 void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { 4126 and3(xcrc, 0xFF, tmp); 4127 sllx(tmp, 2, tmp); 4128 lduw(table, tmp, xtmp); 4129 srlx(xcrc, 8, xcrc); 4130 xor3(xtmp, xcrc, xcrc); 4131 } 4132 4133 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 4134 and3(crc, 0xFF, tmp); 4135 srlx(crc, 8, crc); 4136 sllx(tmp, 2, tmp); 4137 lduw(table, tmp, tmp); 4138 xor3(tmp, crc, crc); 4139 } 4140 4141 #define CRC32_TMP_REG_NUM 18 4142 4143 #define CRC32_CONST_64 0x163cd6124 4144 #define CRC32_CONST_96 0x0ccaa009e 4145 #define CRC32_CONST_160 0x1751997d0 4146 #define CRC32_CONST_480 0x1c6e41596 4147 #define CRC32_CONST_544 0x154442bd4 4148 4149 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { 4150 4151 Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; 4152 Label L_main_loop_prologue; 4153 Label L_fold_512b, L_fold_512b_loop, L_fold_128b; 4154 Label L_fold_tail, L_fold_tail_loop; 4155 Label L_8byte_fold_check; 4156 4157 const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; 4158 4159 Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; 4160 Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; 4161 Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; 4162 Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; 4163 Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; 4164 4165 set(ExternalAddress(StubRoutines::crc_table_addr()), table); 4166 4167 not1(crc); // ~c 4168 clruwu(crc); // clear upper 32 bits of crc 4169 4170 // Check if below cutoff, proceed directly to cleanup code 4171 mov(31, G4); 4172 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4173 4174 // Align buffer to 8 byte boundry 4175 mov(8, O5); 4176 and3(buf, 0x7, O4); 4177 sub(O5, O4, O5); 4178 and3(O5, 0x7, O5); 4179 sub(len, O5, len); 4180 ba(L_align_check); 4181 delayed()->nop(); 4182 4183 // Alignment loop, table look up method for up to 7 bytes 4184 bind(L_align_loop); 4185 ldub(buf, 0, O4); 4186 inc(buf); 4187 dec(O5); 4188 xor3(O4, crc, O4); 4189 and3(O4, 0xFF, O4); 4190 sllx(O4, 2, O4); 4191 lduw(table, O4, O4); 4192 srlx(crc, 8, crc); 4193 xor3(O4, crc, crc); 4194 bind(L_align_check); 4195 nop(); 4196 cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); 4197 4198 // Aligned on 64-bit (8-byte) boundry at this point 4199 // Check if still above cutoff (31-bytes) 4200 mov(31, G4); 4201 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); 4202 // At least 32 bytes left to process 4203 4204 // Free up registers by storing them to FP registers 4205 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4206 movxtod(tmp[i], as_FloatRegister(2*i)); 4207 } 4208 4209 // Determine which loop to enter 4210 // Shared prologue 4211 ldxl(buf, G0, tmp[0]); 4212 inc(buf, 8); 4213 ldxl(buf, G0, tmp[1]); 4214 inc(buf, 8); 4215 xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes 4216 and3(crc, 0, crc); // Clear out the crc register 4217 // Main loop needs 128-bytes at least 4218 mov(128, G4); 4219 mov(64, tmp[2]); 4220 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); 4221 // Less than 64 bytes 4222 nop(); 4223 cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); 4224 // Between 64 and 127 bytes 4225 set64(CRC32_CONST_96, const_96, tmp[8]); 4226 set64(CRC32_CONST_160, const_160, tmp[9]); 4227 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4228 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); 4229 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); 4230 dec(len, 48); 4231 ba(L_fold_tail); 4232 delayed()->nop(); 4233 4234 bind(L_main_loop_prologue); 4235 for (int i = 2; i < 8; i++) { 4236 ldxl(buf, G0, tmp[i]); 4237 inc(buf, 8); 4238 } 4239 4240 // Fold total 512 bits of polynomial on each iteration, 4241 // 128 bits per each of 4 parallel streams 4242 set64(CRC32_CONST_480, const_480, tmp[8]); 4243 set64(CRC32_CONST_544, const_544, tmp[9]); 4244 4245 mov(128, G4); 4246 bind(L_fold_512b_loop); 4247 fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); 4248 fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); 4249 fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); 4250 fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); 4251 dec(len, 64); 4252 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); 4253 4254 // Fold 512 bits to 128 bits 4255 bind(L_fold_512b); 4256 set64(CRC32_CONST_96, const_96, tmp[8]); 4257 set64(CRC32_CONST_160, const_160, tmp[9]); 4258 4259 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); 4260 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); 4261 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); 4262 dec(len, 48); 4263 4264 // Fold the rest of 128 bits data chunks 4265 bind(L_fold_tail); 4266 mov(32, G4); 4267 cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); 4268 4269 set64(CRC32_CONST_96, const_96, tmp[8]); 4270 set64(CRC32_CONST_160, const_160, tmp[9]); 4271 4272 bind(L_fold_tail_loop); 4273 fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); 4274 sub(len, 16, len); 4275 cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); 4276 4277 // Fold the 128 bits in tmps 0 - 1 into tmp 1 4278 bind(L_fold_128b); 4279 4280 set64(CRC32_CONST_64, const_64, tmp[4]); 4281 4282 xmulx(const_64, tmp[0], tmp[2]); 4283 xmulxhi(const_64, tmp[0], tmp[3]); 4284 4285 srl(tmp[2], G0, tmp[4]); 4286 xmulx(const_64, tmp[4], tmp[4]); 4287 4288 srlx(tmp[2], 32, tmp[2]); 4289 sllx(tmp[3], 32, tmp[3]); 4290 or3(tmp[2], tmp[3], tmp[2]); 4291 4292 xor3(tmp[4], tmp[1], tmp[4]); 4293 xor3(tmp[4], tmp[2], tmp[1]); 4294 dec(len, 8); 4295 4296 // Use table lookup for the 8 bytes left in tmp[1] 4297 dec(len, 8); 4298 4299 // 8 8-bit folds to compute 32-bit CRC. 4300 for (int j = 0; j < 4; j++) { 4301 fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); 4302 } 4303 srl(tmp[1], G0, crc); // move 32 bits to general register 4304 for (int j = 0; j < 4; j++) { 4305 fold_8bit_crc32(crc, table, tmp[3]); 4306 } 4307 4308 bind(L_8byte_fold_check); 4309 4310 // Restore int registers saved in FP registers 4311 for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { 4312 movdtox(as_FloatRegister(2*i), tmp[i]); 4313 } 4314 4315 ba(L_cleanup_check); 4316 delayed()->nop(); 4317 4318 // Table look-up method for the remaining few bytes 4319 bind(L_cleanup_loop); 4320 ldub(buf, 0, O4); 4321 inc(buf); 4322 dec(len); 4323 xor3(O4, crc, O4); 4324 and3(O4, 0xFF, O4); 4325 sllx(O4, 2, O4); 4326 lduw(table, O4, O4); 4327 srlx(crc, 8, crc); 4328 xor3(O4, crc, crc); 4329 bind(L_cleanup_check); 4330 nop(); 4331 cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); 4332 4333 not1(crc); 4334 } 4335 4336 #define CHUNK_LEN 128 /* 128 x 8B = 1KB */ 4337 #define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ 4338 #define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ 4339 #define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ 4340 4341 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { 4342 4343 Label L_crc32c_head, L_crc32c_aligned; 4344 Label L_crc32c_parallel, L_crc32c_parallel_loop; 4345 Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; 4346 Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; 4347 4348 set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); 4349 4350 cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); 4351 4352 // clear upper 32 bits of crc 4353 clruwu(crc); 4354 4355 and3(buf, 7, G4); 4356 cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); 4357 4358 mov(8, G1); 4359 sub(G1, G4, G4); 4360 4361 // ------ process the misaligned head (7 bytes or less) ------ 4362 bind(L_crc32c_head); 4363 4364 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4365 ldub(buf, 0, G1); 4366 update_byte_crc32(crc, G1, table); 4367 4368 inc(buf); 4369 dec(len); 4370 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); 4371 dec(G4); 4372 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); 4373 4374 // ------ process the 8-byte-aligned body ------ 4375 bind(L_crc32c_aligned); 4376 nop(); 4377 cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); 4378 4379 // reverse the byte order of lower 32 bits to big endian, and move to FP side 4380 movitof_revbytes(crc, F0, G1, G3); 4381 4382 set(CHUNK_LEN*8*4, G4); 4383 cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); 4384 4385 // ------ process four 1KB chunks in parallel ------ 4386 bind(L_crc32c_parallel); 4387 4388 fzero(FloatRegisterImpl::D, F2); 4389 fzero(FloatRegisterImpl::D, F4); 4390 fzero(FloatRegisterImpl::D, F6); 4391 4392 mov(CHUNK_LEN - 1, G4); 4393 bind(L_crc32c_parallel_loop); 4394 // schedule ldf's ahead of crc32c's to hide the load-use latency 4395 ldf(FloatRegisterImpl::D, buf, 0, F8); 4396 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4397 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4398 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); 4399 crc32c(F0, F8, F0); 4400 crc32c(F2, F10, F2); 4401 crc32c(F4, F12, F4); 4402 crc32c(F6, F14, F6); 4403 inc(buf, 8); 4404 dec(G4); 4405 cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); 4406 4407 ldf(FloatRegisterImpl::D, buf, 0, F8); 4408 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); 4409 ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); 4410 crc32c(F0, F8, F0); 4411 crc32c(F2, F10, F2); 4412 crc32c(F4, F12, F4); 4413 4414 inc(buf, CHUNK_LEN*24); 4415 ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian 4416 inc(buf, 8); 4417 4418 prefetch(buf, 0, Assembler::severalReads); 4419 prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); 4420 prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); 4421 prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); 4422 4423 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4424 movftoi_revbytes(F0, O4, G1, G4); 4425 movftoi_revbytes(F2, O5, G1, G4); 4426 movftoi_revbytes(F4, G5, G1, G4); 4427 4428 // combine the results of 4 chunks 4429 set64(CHUNK_K1, G3, G1); 4430 xmulx(O4, G3, O4); 4431 set64(CHUNK_K2, G3, G1); 4432 xmulx(O5, G3, O5); 4433 set64(CHUNK_K3, G3, G1); 4434 xmulx(G5, G3, G5); 4435 4436 movdtox(F14, G4); 4437 xor3(O4, O5, O5); 4438 xor3(G5, O5, O5); 4439 xor3(G4, O5, O5); 4440 4441 // reverse the byte order to big endian, via stack, and move to FP side 4442 // TODO: use new revb instruction 4443 add(SP, -8, G1); 4444 srlx(G1, 3, G1); 4445 sllx(G1, 3, G1); 4446 stx(O5, G1, G0); 4447 ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian 4448 4449 crc32c(F6, F2, F0); 4450 4451 set(CHUNK_LEN*8*4, G4); 4452 sub(len, G4, len); 4453 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); 4454 nop(); 4455 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); 4456 4457 bind(L_crc32c_serial); 4458 4459 mov(32, G4); 4460 cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); 4461 4462 // ------ process 32B chunks ------ 4463 bind(L_crc32c_x32_loop); 4464 ldf(FloatRegisterImpl::D, buf, 0, F2); 4465 crc32c(F0, F2, F0); 4466 ldf(FloatRegisterImpl::D, buf, 8, F2); 4467 crc32c(F0, F2, F0); 4468 ldf(FloatRegisterImpl::D, buf, 16, F2); 4469 crc32c(F0, F2, F0); 4470 ldf(FloatRegisterImpl::D, buf, 24, F2); 4471 inc(buf, 32); 4472 crc32c(F0, F2, F0); 4473 dec(len, 32); 4474 cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); 4475 4476 bind(L_crc32c_x8); 4477 nop(); 4478 cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); 4479 4480 // ------ process 8B chunks ------ 4481 bind(L_crc32c_x8_loop); 4482 ldf(FloatRegisterImpl::D, buf, 0, F2); 4483 inc(buf, 8); 4484 crc32c(F0, F2, F0); 4485 dec(len, 8); 4486 cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); 4487 4488 bind(L_crc32c_done); 4489 4490 // move to INT side, and reverse the byte order of lower 32 bits to little endian 4491 movftoi_revbytes(F0, crc, G1, G3); 4492 4493 cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); 4494 4495 // ------ process the misaligned tail (7 bytes or less) ------ 4496 bind(L_crc32c_tail); 4497 4498 // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; 4499 ldub(buf, 0, G1); 4500 update_byte_crc32(crc, G1, table); 4501 4502 inc(buf); 4503 dec(len); 4504 cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); 4505 4506 bind(L_crc32c_return); 4507 nop(); 4508 }