1 /* 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -28 [ argument word 1 ] 141 // -27 [ saved xmm15 ] <--- rsp_after_call 142 // [ saved xmm7-xmm14 ] 143 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 144 // -7 [ saved r15 ] 145 // -6 [ saved r14 ] 146 // -5 [ saved r13 ] 147 // -4 [ saved r12 ] 148 // -3 [ saved rdi ] 149 // -2 [ saved rsi ] 150 // -1 [ saved rbx ] 151 // 0 [ saved rbp ] <--- rbp 152 // 1 [ return address ] 153 // 2 [ call wrapper ] 154 // 3 [ result ] 155 // 4 [ result type ] 156 // 5 [ method ] 157 // 6 [ entry point ] 158 // 7 [ parameters ] 159 // 8 [ parameter size ] 160 // 9 [ thread ] 161 // 162 // Windows reserves the callers stack space for arguments 1-4. 163 // We spill c_rarg0-c_rarg3 to this space. 164 165 // Call stub stack layout word offsets from rbp 166 enum call_stub_layout { 167 #ifdef _WIN64 168 xmm_save_first = 6, // save from xmm6 169 xmm_save_last = 15, // to xmm15 170 xmm_save_base = -9, 171 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 172 r15_off = -7, 173 r14_off = -6, 174 r13_off = -5, 175 r12_off = -4, 176 rdi_off = -3, 177 rsi_off = -2, 178 rbx_off = -1, 179 rbp_off = 0, 180 retaddr_off = 1, 181 call_wrapper_off = 2, 182 result_off = 3, 183 result_type_off = 4, 184 method_off = 5, 185 entry_point_off = 6, 186 parameters_off = 7, 187 parameter_size_off = 8, 188 thread_off = 9 189 #else 190 rsp_after_call_off = -12, 191 mxcsr_off = rsp_after_call_off, 192 r15_off = -11, 193 r14_off = -10, 194 r13_off = -9, 195 r12_off = -8, 196 rbx_off = -7, 197 call_wrapper_off = -6, 198 result_off = -5, 199 result_type_off = -4, 200 method_off = -3, 201 entry_point_off = -2, 202 parameters_off = -1, 203 rbp_off = 0, 204 retaddr_off = 1, 205 parameter_size_off = 2, 206 thread_off = 3 207 #endif 208 }; 209 210 #ifdef _WIN64 211 Address xmm_save(int reg) { 212 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 213 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 214 } 215 #endif 216 217 address generate_call_stub(address& return_address) { 218 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 219 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 220 "adjust this code"); 221 StubCodeMark mark(this, "StubRoutines", "call_stub"); 222 address start = __ pc(); 223 224 // same as in generate_catch_exception()! 225 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 226 227 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 228 const Address result (rbp, result_off * wordSize); 229 const Address result_type (rbp, result_type_off * wordSize); 230 const Address method (rbp, method_off * wordSize); 231 const Address entry_point (rbp, entry_point_off * wordSize); 232 const Address parameters (rbp, parameters_off * wordSize); 233 const Address parameter_size(rbp, parameter_size_off * wordSize); 234 235 // same as in generate_catch_exception()! 236 const Address thread (rbp, thread_off * wordSize); 237 238 const Address r15_save(rbp, r15_off * wordSize); 239 const Address r14_save(rbp, r14_off * wordSize); 240 const Address r13_save(rbp, r13_off * wordSize); 241 const Address r12_save(rbp, r12_off * wordSize); 242 const Address rbx_save(rbp, rbx_off * wordSize); 243 244 // stub code 245 __ enter(); 246 __ subptr(rsp, -rsp_after_call_off * wordSize); 247 248 // save register parameters 249 #ifndef _WIN64 250 __ movptr(parameters, c_rarg5); // parameters 251 __ movptr(entry_point, c_rarg4); // entry_point 252 #endif 253 254 __ movptr(method, c_rarg3); // method 255 __ movl(result_type, c_rarg2); // result type 256 __ movptr(result, c_rarg1); // result 257 __ movptr(call_wrapper, c_rarg0); // call wrapper 258 259 // save regs belonging to calling function 260 __ movptr(rbx_save, rbx); 261 __ movptr(r12_save, r12); 262 __ movptr(r13_save, r13); 263 __ movptr(r14_save, r14); 264 __ movptr(r15_save, r15); 265 #ifdef _WIN64 266 for (int i = 6; i <= 15; i++) { 267 __ movdqu(xmm_save(i), as_XMMRegister(i)); 268 } 269 270 const Address rdi_save(rbp, rdi_off * wordSize); 271 const Address rsi_save(rbp, rsi_off * wordSize); 272 273 __ movptr(rsi_save, rsi); 274 __ movptr(rdi_save, rdi); 275 #else 276 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 277 { 278 Label skip_ldmx; 279 __ stmxcsr(mxcsr_save); 280 __ movl(rax, mxcsr_save); 281 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 282 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 283 __ cmp32(rax, mxcsr_std); 284 __ jcc(Assembler::equal, skip_ldmx); 285 __ ldmxcsr(mxcsr_std); 286 __ bind(skip_ldmx); 287 } 288 #endif 289 290 // Load up thread register 291 __ movptr(r15_thread, thread); 292 __ reinit_heapbase(); 293 294 #ifdef ASSERT 295 // make sure we have no pending exceptions 296 { 297 Label L; 298 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 299 __ jcc(Assembler::equal, L); 300 __ stop("StubRoutines::call_stub: entered with pending exception"); 301 __ bind(L); 302 } 303 #endif 304 305 // pass parameters if any 306 BLOCK_COMMENT("pass parameters if any"); 307 Label parameters_done; 308 __ movl(c_rarg3, parameter_size); 309 __ testl(c_rarg3, c_rarg3); 310 __ jcc(Assembler::zero, parameters_done); 311 312 Label loop; 313 __ movptr(c_rarg2, parameters); // parameter pointer 314 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 315 __ BIND(loop); 316 __ movptr(rax, Address(c_rarg2, 0));// get parameter 317 __ addptr(c_rarg2, wordSize); // advance to next parameter 318 __ decrementl(c_rarg1); // decrement counter 319 __ push(rax); // pass parameter 320 __ jcc(Assembler::notZero, loop); 321 322 // call Java function 323 __ BIND(parameters_done); 324 __ movptr(rbx, method); // get Method* 325 __ movptr(c_rarg1, entry_point); // get entry_point 326 __ mov(r13, rsp); // set sender sp 327 BLOCK_COMMENT("call Java function"); 328 __ call(c_rarg1); 329 330 BLOCK_COMMENT("call_stub_return_address:"); 331 return_address = __ pc(); 332 333 // store result depending on type (everything that is not 334 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 335 __ movptr(c_rarg0, result); 336 Label is_long, is_float, is_double, exit; 337 __ movl(c_rarg1, result_type); 338 __ cmpl(c_rarg1, T_OBJECT); 339 __ jcc(Assembler::equal, is_long); 340 __ cmpl(c_rarg1, T_LONG); 341 __ jcc(Assembler::equal, is_long); 342 __ cmpl(c_rarg1, T_FLOAT); 343 __ jcc(Assembler::equal, is_float); 344 __ cmpl(c_rarg1, T_DOUBLE); 345 __ jcc(Assembler::equal, is_double); 346 347 // handle T_INT case 348 __ movl(Address(c_rarg0, 0), rax); 349 350 __ BIND(exit); 351 352 // pop parameters 353 __ lea(rsp, rsp_after_call); 354 355 #ifdef ASSERT 356 // verify that threads correspond 357 { 358 Label L, S; 359 __ cmpptr(r15_thread, thread); 360 __ jcc(Assembler::notEqual, S); 361 __ get_thread(rbx); 362 __ cmpptr(r15_thread, rbx); 363 __ jcc(Assembler::equal, L); 364 __ bind(S); 365 __ jcc(Assembler::equal, L); 366 __ stop("StubRoutines::call_stub: threads must correspond"); 367 __ bind(L); 368 } 369 #endif 370 371 // restore regs belonging to calling function 372 #ifdef _WIN64 373 for (int i = 15; i >= 6; i--) { 374 __ movdqu(as_XMMRegister(i), xmm_save(i)); 375 } 376 #endif 377 __ movptr(r15, r15_save); 378 __ movptr(r14, r14_save); 379 __ movptr(r13, r13_save); 380 __ movptr(r12, r12_save); 381 __ movptr(rbx, rbx_save); 382 383 #ifdef _WIN64 384 __ movptr(rdi, rdi_save); 385 __ movptr(rsi, rsi_save); 386 #else 387 __ ldmxcsr(mxcsr_save); 388 #endif 389 390 // restore rsp 391 __ addptr(rsp, -rsp_after_call_off * wordSize); 392 393 // return 394 __ pop(rbp); 395 __ ret(0); 396 397 // handle return types different from T_INT 398 __ BIND(is_long); 399 __ movq(Address(c_rarg0, 0), rax); 400 __ jmp(exit); 401 402 __ BIND(is_float); 403 __ movflt(Address(c_rarg0, 0), xmm0); 404 __ jmp(exit); 405 406 __ BIND(is_double); 407 __ movdbl(Address(c_rarg0, 0), xmm0); 408 __ jmp(exit); 409 410 return start; 411 } 412 413 // Return point for a Java call if there's an exception thrown in 414 // Java code. The exception is caught and transformed into a 415 // pending exception stored in JavaThread that can be tested from 416 // within the VM. 417 // 418 // Note: Usually the parameters are removed by the callee. In case 419 // of an exception crossing an activation frame boundary, that is 420 // not the case if the callee is compiled code => need to setup the 421 // rsp. 422 // 423 // rax: exception oop 424 425 address generate_catch_exception() { 426 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 427 address start = __ pc(); 428 429 // same as in generate_call_stub(): 430 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 431 const Address thread (rbp, thread_off * wordSize); 432 433 #ifdef ASSERT 434 // verify that threads correspond 435 { 436 Label L, S; 437 __ cmpptr(r15_thread, thread); 438 __ jcc(Assembler::notEqual, S); 439 __ get_thread(rbx); 440 __ cmpptr(r15_thread, rbx); 441 __ jcc(Assembler::equal, L); 442 __ bind(S); 443 __ stop("StubRoutines::catch_exception: threads must correspond"); 444 __ bind(L); 445 } 446 #endif 447 448 // set pending exception 449 __ verify_oop(rax); 450 451 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 452 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 453 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 454 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 455 456 // complete return to VM 457 assert(StubRoutines::_call_stub_return_address != NULL, 458 "_call_stub_return_address must have been generated before"); 459 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 460 461 return start; 462 } 463 464 // Continuation point for runtime calls returning with a pending 465 // exception. The pending exception check happened in the runtime 466 // or native call stub. The pending exception in Thread is 467 // converted into a Java-level exception. 468 // 469 // Contract with Java-level exception handlers: 470 // rax: exception 471 // rdx: throwing pc 472 // 473 // NOTE: At entry of this stub, exception-pc must be on stack !! 474 475 address generate_forward_exception() { 476 StubCodeMark mark(this, "StubRoutines", "forward exception"); 477 address start = __ pc(); 478 479 // Upon entry, the sp points to the return address returning into 480 // Java (interpreted or compiled) code; i.e., the return address 481 // becomes the throwing pc. 482 // 483 // Arguments pushed before the runtime call are still on the stack 484 // but the exception handler will reset the stack pointer -> 485 // ignore them. A potential result in registers can be ignored as 486 // well. 487 488 #ifdef ASSERT 489 // make sure this code is only executed if there is a pending exception 490 { 491 Label L; 492 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 493 __ jcc(Assembler::notEqual, L); 494 __ stop("StubRoutines::forward exception: no pending exception (1)"); 495 __ bind(L); 496 } 497 #endif 498 499 // compute exception handler into rbx 500 __ movptr(c_rarg0, Address(rsp, 0)); 501 BLOCK_COMMENT("call exception_handler_for_return_address"); 502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 503 SharedRuntime::exception_handler_for_return_address), 504 r15_thread, c_rarg0); 505 __ mov(rbx, rax); 506 507 // setup rax & rdx, remove return address & clear pending exception 508 __ pop(rdx); 509 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 510 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 511 512 #ifdef ASSERT 513 // make sure exception is set 514 { 515 Label L; 516 __ testptr(rax, rax); 517 __ jcc(Assembler::notEqual, L); 518 __ stop("StubRoutines::forward exception: no pending exception (2)"); 519 __ bind(L); 520 } 521 #endif 522 523 // continue at exception handler (return address removed) 524 // rax: exception 525 // rbx: exception handler 526 // rdx: throwing pc 527 __ verify_oop(rax); 528 __ jmp(rbx); 529 530 return start; 531 } 532 533 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 534 // 535 // Arguments : 536 // c_rarg0: exchange_value 537 // c_rarg0: dest 538 // 539 // Result: 540 // *dest <- ex, return (orig *dest) 541 address generate_atomic_xchg() { 542 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 543 address start = __ pc(); 544 545 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 546 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 547 __ ret(0); 548 549 return start; 550 } 551 552 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 553 // 554 // Arguments : 555 // c_rarg0: exchange_value 556 // c_rarg1: dest 557 // 558 // Result: 559 // *dest <- ex, return (orig *dest) 560 address generate_atomic_xchg_ptr() { 561 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 562 address start = __ pc(); 563 564 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 565 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 566 __ ret(0); 567 568 return start; 569 } 570 571 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 572 // jint compare_value) 573 // 574 // Arguments : 575 // c_rarg0: exchange_value 576 // c_rarg1: dest 577 // c_rarg2: compare_value 578 // 579 // Result: 580 // if ( compare_value == *dest ) { 581 // *dest = exchange_value 582 // return compare_value; 583 // else 584 // return *dest; 585 address generate_atomic_cmpxchg() { 586 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 587 address start = __ pc(); 588 589 __ movl(rax, c_rarg2); 590 if ( os::is_MP() ) __ lock(); 591 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 592 __ ret(0); 593 594 return start; 595 } 596 597 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, 598 // volatile jlong* dest, 599 // jlong compare_value) 600 // Arguments : 601 // c_rarg0: exchange_value 602 // c_rarg1: dest 603 // c_rarg2: compare_value 604 // 605 // Result: 606 // if ( compare_value == *dest ) { 607 // *dest = exchange_value 608 // return compare_value; 609 // else 610 // return *dest; 611 address generate_atomic_cmpxchg_long() { 612 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 613 address start = __ pc(); 614 615 __ movq(rax, c_rarg2); 616 if ( os::is_MP() ) __ lock(); 617 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 618 __ ret(0); 619 620 return start; 621 } 622 623 // Support for jint atomic::add(jint add_value, volatile jint* dest) 624 // 625 // Arguments : 626 // c_rarg0: add_value 627 // c_rarg1: dest 628 // 629 // Result: 630 // *dest += add_value 631 // return *dest; 632 address generate_atomic_add() { 633 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 634 address start = __ pc(); 635 636 __ movl(rax, c_rarg0); 637 if ( os::is_MP() ) __ lock(); 638 __ xaddl(Address(c_rarg1, 0), c_rarg0); 639 __ addl(rax, c_rarg0); 640 __ ret(0); 641 642 return start; 643 } 644 645 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 646 // 647 // Arguments : 648 // c_rarg0: add_value 649 // c_rarg1: dest 650 // 651 // Result: 652 // *dest += add_value 653 // return *dest; 654 address generate_atomic_add_ptr() { 655 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 656 address start = __ pc(); 657 658 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 659 if ( os::is_MP() ) __ lock(); 660 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 661 __ addptr(rax, c_rarg0); 662 __ ret(0); 663 664 return start; 665 } 666 667 // Support for intptr_t OrderAccess::fence() 668 // 669 // Arguments : 670 // 671 // Result: 672 address generate_orderaccess_fence() { 673 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 674 address start = __ pc(); 675 __ membar(Assembler::StoreLoad); 676 __ ret(0); 677 678 return start; 679 } 680 681 // Support for intptr_t get_previous_fp() 682 // 683 // This routine is used to find the previous frame pointer for the 684 // caller (current_frame_guess). This is used as part of debugging 685 // ps() is seemingly lost trying to find frames. 686 // This code assumes that caller current_frame_guess) has a frame. 687 address generate_get_previous_fp() { 688 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 689 const Address old_fp(rbp, 0); 690 const Address older_fp(rax, 0); 691 address start = __ pc(); 692 693 __ enter(); 694 __ movptr(rax, old_fp); // callers fp 695 __ movptr(rax, older_fp); // the frame for ps() 696 __ pop(rbp); 697 __ ret(0); 698 699 return start; 700 } 701 702 // Support for intptr_t get_previous_sp() 703 // 704 // This routine is used to find the previous stack pointer for the 705 // caller. 706 address generate_get_previous_sp() { 707 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 708 address start = __ pc(); 709 710 __ movptr(rax, rsp); 711 __ addptr(rax, 8); // return address is at the top of the stack. 712 __ ret(0); 713 714 return start; 715 } 716 717 //---------------------------------------------------------------------------------------------------- 718 // Support for void verify_mxcsr() 719 // 720 // This routine is used with -Xcheck:jni to verify that native 721 // JNI code does not return to Java code without restoring the 722 // MXCSR register to our expected state. 723 724 address generate_verify_mxcsr() { 725 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 726 address start = __ pc(); 727 728 const Address mxcsr_save(rsp, 0); 729 730 if (CheckJNICalls) { 731 Label ok_ret; 732 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 733 __ push(rax); 734 __ subptr(rsp, wordSize); // allocate a temp location 735 __ stmxcsr(mxcsr_save); 736 __ movl(rax, mxcsr_save); 737 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 738 __ cmp32(rax, mxcsr_std); 739 __ jcc(Assembler::equal, ok_ret); 740 741 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 742 743 __ ldmxcsr(mxcsr_std); 744 745 __ bind(ok_ret); 746 __ addptr(rsp, wordSize); 747 __ pop(rax); 748 } 749 750 __ ret(0); 751 752 return start; 753 } 754 755 address generate_f2i_fixup() { 756 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 757 Address inout(rsp, 5 * wordSize); // return address + 4 saves 758 759 address start = __ pc(); 760 761 Label L; 762 763 __ push(rax); 764 __ push(c_rarg3); 765 __ push(c_rarg2); 766 __ push(c_rarg1); 767 768 __ movl(rax, 0x7f800000); 769 __ xorl(c_rarg3, c_rarg3); 770 __ movl(c_rarg2, inout); 771 __ movl(c_rarg1, c_rarg2); 772 __ andl(c_rarg1, 0x7fffffff); 773 __ cmpl(rax, c_rarg1); // NaN? -> 0 774 __ jcc(Assembler::negative, L); 775 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 776 __ movl(c_rarg3, 0x80000000); 777 __ movl(rax, 0x7fffffff); 778 __ cmovl(Assembler::positive, c_rarg3, rax); 779 780 __ bind(L); 781 __ movptr(inout, c_rarg3); 782 783 __ pop(c_rarg1); 784 __ pop(c_rarg2); 785 __ pop(c_rarg3); 786 __ pop(rax); 787 788 __ ret(0); 789 790 return start; 791 } 792 793 address generate_f2l_fixup() { 794 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 795 Address inout(rsp, 5 * wordSize); // return address + 4 saves 796 address start = __ pc(); 797 798 Label L; 799 800 __ push(rax); 801 __ push(c_rarg3); 802 __ push(c_rarg2); 803 __ push(c_rarg1); 804 805 __ movl(rax, 0x7f800000); 806 __ xorl(c_rarg3, c_rarg3); 807 __ movl(c_rarg2, inout); 808 __ movl(c_rarg1, c_rarg2); 809 __ andl(c_rarg1, 0x7fffffff); 810 __ cmpl(rax, c_rarg1); // NaN? -> 0 811 __ jcc(Assembler::negative, L); 812 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 813 __ mov64(c_rarg3, 0x8000000000000000); 814 __ mov64(rax, 0x7fffffffffffffff); 815 __ cmov(Assembler::positive, c_rarg3, rax); 816 817 __ bind(L); 818 __ movptr(inout, c_rarg3); 819 820 __ pop(c_rarg1); 821 __ pop(c_rarg2); 822 __ pop(c_rarg3); 823 __ pop(rax); 824 825 __ ret(0); 826 827 return start; 828 } 829 830 address generate_d2i_fixup() { 831 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 832 Address inout(rsp, 6 * wordSize); // return address + 5 saves 833 834 address start = __ pc(); 835 836 Label L; 837 838 __ push(rax); 839 __ push(c_rarg3); 840 __ push(c_rarg2); 841 __ push(c_rarg1); 842 __ push(c_rarg0); 843 844 __ movl(rax, 0x7ff00000); 845 __ movq(c_rarg2, inout); 846 __ movl(c_rarg3, c_rarg2); 847 __ mov(c_rarg1, c_rarg2); 848 __ mov(c_rarg0, c_rarg2); 849 __ negl(c_rarg3); 850 __ shrptr(c_rarg1, 0x20); 851 __ orl(c_rarg3, c_rarg2); 852 __ andl(c_rarg1, 0x7fffffff); 853 __ xorl(c_rarg2, c_rarg2); 854 __ shrl(c_rarg3, 0x1f); 855 __ orl(c_rarg1, c_rarg3); 856 __ cmpl(rax, c_rarg1); 857 __ jcc(Assembler::negative, L); // NaN -> 0 858 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 859 __ movl(c_rarg2, 0x80000000); 860 __ movl(rax, 0x7fffffff); 861 __ cmov(Assembler::positive, c_rarg2, rax); 862 863 __ bind(L); 864 __ movptr(inout, c_rarg2); 865 866 __ pop(c_rarg0); 867 __ pop(c_rarg1); 868 __ pop(c_rarg2); 869 __ pop(c_rarg3); 870 __ pop(rax); 871 872 __ ret(0); 873 874 return start; 875 } 876 877 address generate_d2l_fixup() { 878 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 879 Address inout(rsp, 6 * wordSize); // return address + 5 saves 880 881 address start = __ pc(); 882 883 Label L; 884 885 __ push(rax); 886 __ push(c_rarg3); 887 __ push(c_rarg2); 888 __ push(c_rarg1); 889 __ push(c_rarg0); 890 891 __ movl(rax, 0x7ff00000); 892 __ movq(c_rarg2, inout); 893 __ movl(c_rarg3, c_rarg2); 894 __ mov(c_rarg1, c_rarg2); 895 __ mov(c_rarg0, c_rarg2); 896 __ negl(c_rarg3); 897 __ shrptr(c_rarg1, 0x20); 898 __ orl(c_rarg3, c_rarg2); 899 __ andl(c_rarg1, 0x7fffffff); 900 __ xorl(c_rarg2, c_rarg2); 901 __ shrl(c_rarg3, 0x1f); 902 __ orl(c_rarg1, c_rarg3); 903 __ cmpl(rax, c_rarg1); 904 __ jcc(Assembler::negative, L); // NaN -> 0 905 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 906 __ mov64(c_rarg2, 0x8000000000000000); 907 __ mov64(rax, 0x7fffffffffffffff); 908 __ cmovq(Assembler::positive, c_rarg2, rax); 909 910 __ bind(L); 911 __ movq(inout, c_rarg2); 912 913 __ pop(c_rarg0); 914 __ pop(c_rarg1); 915 __ pop(c_rarg2); 916 __ pop(c_rarg3); 917 __ pop(rax); 918 919 __ ret(0); 920 921 return start; 922 } 923 924 address generate_fp_mask(const char *stub_name, int64_t mask) { 925 __ align(CodeEntryAlignment); 926 StubCodeMark mark(this, "StubRoutines", stub_name); 927 address start = __ pc(); 928 929 __ emit_data64( mask, relocInfo::none ); 930 __ emit_data64( mask, relocInfo::none ); 931 932 return start; 933 } 934 935 // The following routine generates a subroutine to throw an 936 // asynchronous UnknownError when an unsafe access gets a fault that 937 // could not be reasonably prevented by the programmer. (Example: 938 // SIGBUS/OBJERR.) 939 address generate_handler_for_unsafe_access() { 940 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 941 address start = __ pc(); 942 943 __ push(0); // hole for return address-to-be 944 __ pusha(); // push registers 945 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 946 947 // FIXME: this probably needs alignment logic 948 949 __ subptr(rsp, frame::arg_reg_save_area_bytes); 950 BLOCK_COMMENT("call handle_unsafe_access"); 951 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 952 __ addptr(rsp, frame::arg_reg_save_area_bytes); 953 954 __ movptr(next_pc, rax); // stuff next address 955 __ popa(); 956 __ ret(0); // jump to next address 957 958 return start; 959 } 960 961 // Non-destructive plausibility checks for oops 962 // 963 // Arguments: 964 // all args on stack! 965 // 966 // Stack after saving c_rarg3: 967 // [tos + 0]: saved c_rarg3 968 // [tos + 1]: saved c_rarg2 969 // [tos + 2]: saved r12 (several TemplateTable methods use it) 970 // [tos + 3]: saved flags 971 // [tos + 4]: return address 972 // * [tos + 5]: error message (char*) 973 // * [tos + 6]: object to verify (oop) 974 // * [tos + 7]: saved rax - saved by caller and bashed 975 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 976 // * = popped on exit 977 address generate_verify_oop() { 978 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 979 address start = __ pc(); 980 981 Label exit, error; 982 983 __ pushf(); 984 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 985 986 __ push(r12); 987 988 // save c_rarg2 and c_rarg3 989 __ push(c_rarg2); 990 __ push(c_rarg3); 991 992 enum { 993 // After previous pushes. 994 oop_to_verify = 6 * wordSize, 995 saved_rax = 7 * wordSize, 996 saved_r10 = 8 * wordSize, 997 998 // Before the call to MacroAssembler::debug(), see below. 999 return_addr = 16 * wordSize, 1000 error_msg = 17 * wordSize 1001 }; 1002 1003 // get object 1004 __ movptr(rax, Address(rsp, oop_to_verify)); 1005 1006 // make sure object is 'reasonable' 1007 __ testptr(rax, rax); 1008 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1009 // Check if the oop is in the right area of memory 1010 __ movptr(c_rarg2, rax); 1011 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1012 __ andptr(c_rarg2, c_rarg3); 1013 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1014 __ cmpptr(c_rarg2, c_rarg3); 1015 __ jcc(Assembler::notZero, error); 1016 1017 // set r12 to heapbase for load_klass() 1018 __ reinit_heapbase(); 1019 1020 // make sure klass is 'reasonable', which is not zero. 1021 __ load_klass(rax, rax); // get klass 1022 __ testptr(rax, rax); 1023 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1024 1025 // return if everything seems ok 1026 __ bind(exit); 1027 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1028 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1029 __ pop(c_rarg3); // restore c_rarg3 1030 __ pop(c_rarg2); // restore c_rarg2 1031 __ pop(r12); // restore r12 1032 __ popf(); // restore flags 1033 __ ret(4 * wordSize); // pop caller saved stuff 1034 1035 // handle errors 1036 __ bind(error); 1037 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1038 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1039 __ pop(c_rarg3); // get saved c_rarg3 back 1040 __ pop(c_rarg2); // get saved c_rarg2 back 1041 __ pop(r12); // get saved r12 back 1042 __ popf(); // get saved flags off stack -- 1043 // will be ignored 1044 1045 __ pusha(); // push registers 1046 // (rip is already 1047 // already pushed) 1048 // debug(char* msg, int64_t pc, int64_t regs[]) 1049 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1050 // pushed all the registers, so now the stack looks like: 1051 // [tos + 0] 16 saved registers 1052 // [tos + 16] return address 1053 // * [tos + 17] error message (char*) 1054 // * [tos + 18] object to verify (oop) 1055 // * [tos + 19] saved rax - saved by caller and bashed 1056 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1057 // * = popped on exit 1058 1059 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1060 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1061 __ movq(c_rarg2, rsp); // pass address of regs on stack 1062 __ mov(r12, rsp); // remember rsp 1063 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1064 __ andptr(rsp, -16); // align stack as required by ABI 1065 BLOCK_COMMENT("call MacroAssembler::debug"); 1066 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1067 __ mov(rsp, r12); // restore rsp 1068 __ popa(); // pop registers (includes r12) 1069 __ ret(4 * wordSize); // pop caller saved stuff 1070 1071 return start; 1072 } 1073 1074 // 1075 // Verify that a register contains clean 32-bits positive value 1076 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1077 // 1078 // Input: 1079 // Rint - 32-bits value 1080 // Rtmp - scratch 1081 // 1082 void assert_clean_int(Register Rint, Register Rtmp) { 1083 #ifdef ASSERT 1084 Label L; 1085 assert_different_registers(Rtmp, Rint); 1086 __ movslq(Rtmp, Rint); 1087 __ cmpq(Rtmp, Rint); 1088 __ jcc(Assembler::equal, L); 1089 __ stop("high 32-bits of int value are not 0"); 1090 __ bind(L); 1091 #endif 1092 } 1093 1094 // Generate overlap test for array copy stubs 1095 // 1096 // Input: 1097 // c_rarg0 - from 1098 // c_rarg1 - to 1099 // c_rarg2 - element count 1100 // 1101 // Output: 1102 // rax - &from[element count - 1] 1103 // 1104 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1105 assert(no_overlap_target != NULL, "must be generated"); 1106 array_overlap_test(no_overlap_target, NULL, sf); 1107 } 1108 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1109 array_overlap_test(NULL, &L_no_overlap, sf); 1110 } 1111 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1112 const Register from = c_rarg0; 1113 const Register to = c_rarg1; 1114 const Register count = c_rarg2; 1115 const Register end_from = rax; 1116 1117 __ cmpptr(to, from); 1118 __ lea(end_from, Address(from, count, sf, 0)); 1119 if (NOLp == NULL) { 1120 ExternalAddress no_overlap(no_overlap_target); 1121 __ jump_cc(Assembler::belowEqual, no_overlap); 1122 __ cmpptr(to, end_from); 1123 __ jump_cc(Assembler::aboveEqual, no_overlap); 1124 } else { 1125 __ jcc(Assembler::belowEqual, (*NOLp)); 1126 __ cmpptr(to, end_from); 1127 __ jcc(Assembler::aboveEqual, (*NOLp)); 1128 } 1129 } 1130 1131 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1132 // 1133 // Outputs: 1134 // rdi - rcx 1135 // rsi - rdx 1136 // rdx - r8 1137 // rcx - r9 1138 // 1139 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1140 // are non-volatile. r9 and r10 should not be used by the caller. 1141 // 1142 void setup_arg_regs(int nargs = 3) { 1143 const Register saved_rdi = r9; 1144 const Register saved_rsi = r10; 1145 assert(nargs == 3 || nargs == 4, "else fix"); 1146 #ifdef _WIN64 1147 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1148 "unexpected argument registers"); 1149 if (nargs >= 4) 1150 __ mov(rax, r9); // r9 is also saved_rdi 1151 __ movptr(saved_rdi, rdi); 1152 __ movptr(saved_rsi, rsi); 1153 __ mov(rdi, rcx); // c_rarg0 1154 __ mov(rsi, rdx); // c_rarg1 1155 __ mov(rdx, r8); // c_rarg2 1156 if (nargs >= 4) 1157 __ mov(rcx, rax); // c_rarg3 (via rax) 1158 #else 1159 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1160 "unexpected argument registers"); 1161 #endif 1162 } 1163 1164 void restore_arg_regs() { 1165 const Register saved_rdi = r9; 1166 const Register saved_rsi = r10; 1167 #ifdef _WIN64 1168 __ movptr(rdi, saved_rdi); 1169 __ movptr(rsi, saved_rsi); 1170 #endif 1171 } 1172 1173 // Generate code for an array write pre barrier 1174 // 1175 // addr - starting address 1176 // count - element count 1177 // tmp - scratch register 1178 // 1179 // Destroy no registers! 1180 // 1181 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1182 BarrierSet* bs = Universe::heap()->barrier_set(); 1183 switch (bs->kind()) { 1184 case BarrierSet::G1SATBCT: 1185 case BarrierSet::G1SATBCTLogging: 1186 // With G1, don't generate the call if we statically know that the target in uninitialized 1187 if (!dest_uninitialized) { 1188 __ pusha(); // push registers 1189 if (count == c_rarg0) { 1190 if (addr == c_rarg1) { 1191 // exactly backwards!! 1192 __ xchgptr(c_rarg1, c_rarg0); 1193 } else { 1194 __ movptr(c_rarg1, count); 1195 __ movptr(c_rarg0, addr); 1196 } 1197 } else { 1198 __ movptr(c_rarg0, addr); 1199 __ movptr(c_rarg1, count); 1200 } 1201 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1202 __ popa(); 1203 } 1204 break; 1205 case BarrierSet::CardTableModRef: 1206 case BarrierSet::CardTableExtension: 1207 case BarrierSet::ModRef: 1208 break; 1209 default: 1210 ShouldNotReachHere(); 1211 1212 } 1213 } 1214 1215 // 1216 // Generate code for an array write post barrier 1217 // 1218 // Input: 1219 // start - register containing starting address of destination array 1220 // count - elements count 1221 // scratch - scratch register 1222 // 1223 // The input registers are overwritten. 1224 // 1225 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1226 assert_different_registers(start, count, scratch); 1227 BarrierSet* bs = Universe::heap()->barrier_set(); 1228 switch (bs->kind()) { 1229 case BarrierSet::G1SATBCT: 1230 case BarrierSet::G1SATBCTLogging: 1231 { 1232 __ pusha(); // push registers (overkill) 1233 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1234 assert_different_registers(c_rarg1, start); 1235 __ mov(c_rarg1, count); 1236 __ mov(c_rarg0, start); 1237 } else { 1238 assert_different_registers(c_rarg0, count); 1239 __ mov(c_rarg0, start); 1240 __ mov(c_rarg1, count); 1241 } 1242 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1243 __ popa(); 1244 } 1245 break; 1246 case BarrierSet::CardTableModRef: 1247 case BarrierSet::CardTableExtension: 1248 { 1249 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1250 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1251 1252 Label L_loop; 1253 const Register end = count; 1254 1255 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1256 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1257 __ shrptr(start, CardTableModRefBS::card_shift); 1258 __ shrptr(end, CardTableModRefBS::card_shift); 1259 __ subptr(end, start); // end --> cards count 1260 1261 int64_t disp = (int64_t) ct->byte_map_base; 1262 __ mov64(scratch, disp); 1263 __ addptr(start, scratch); 1264 __ BIND(L_loop); 1265 __ movb(Address(start, count, Address::times_1), 0); 1266 __ decrement(count); 1267 __ jcc(Assembler::greaterEqual, L_loop); 1268 } 1269 break; 1270 default: 1271 ShouldNotReachHere(); 1272 1273 } 1274 } 1275 1276 1277 // Copy big chunks forward 1278 // 1279 // Inputs: 1280 // end_from - source arrays end address 1281 // end_to - destination array end address 1282 // qword_count - 64-bits element count, negative 1283 // to - scratch 1284 // L_copy_bytes - entry label 1285 // L_copy_8_bytes - exit label 1286 // 1287 void copy_bytes_forward(Register end_from, Register end_to, 1288 Register qword_count, Register to, 1289 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1290 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1291 Label L_loop; 1292 __ align(OptoLoopAlignment); 1293 if (UseUnalignedLoadStores) { 1294 Label L_end; 1295 // Copy 64-bytes per iteration 1296 __ BIND(L_loop); 1297 if (UseAVX >= 2) { 1298 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1299 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1300 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1301 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1302 } else { 1303 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1304 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1305 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1306 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1307 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1308 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1309 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1310 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1311 } 1312 __ BIND(L_copy_bytes); 1313 __ addptr(qword_count, 8); 1314 __ jcc(Assembler::lessEqual, L_loop); 1315 __ subptr(qword_count, 4); // sub(8) and add(4) 1316 __ jccb(Assembler::greater, L_end); 1317 // Copy trailing 32 bytes 1318 if (UseAVX >= 2) { 1319 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1320 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1321 } else { 1322 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1323 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1324 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1325 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1326 } 1327 __ addptr(qword_count, 4); 1328 __ BIND(L_end); 1329 if (UseAVX >= 2) { 1330 // clean upper bits of YMM registers 1331 __ vpxor(xmm0, xmm0); 1332 __ vpxor(xmm1, xmm1); 1333 } 1334 } else { 1335 // Copy 32-bytes per iteration 1336 __ BIND(L_loop); 1337 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1338 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1339 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1340 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1341 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1342 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1343 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1344 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1345 1346 __ BIND(L_copy_bytes); 1347 __ addptr(qword_count, 4); 1348 __ jcc(Assembler::lessEqual, L_loop); 1349 } 1350 __ subptr(qword_count, 4); 1351 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1352 } 1353 1354 // Copy big chunks backward 1355 // 1356 // Inputs: 1357 // from - source arrays address 1358 // dest - destination array address 1359 // qword_count - 64-bits element count 1360 // to - scratch 1361 // L_copy_bytes - entry label 1362 // L_copy_8_bytes - exit label 1363 // 1364 void copy_bytes_backward(Register from, Register dest, 1365 Register qword_count, Register to, 1366 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1367 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1368 Label L_loop; 1369 __ align(OptoLoopAlignment); 1370 if (UseUnalignedLoadStores) { 1371 Label L_end; 1372 // Copy 64-bytes per iteration 1373 __ BIND(L_loop); 1374 if (UseAVX >= 2) { 1375 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1376 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1377 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1378 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1379 } else { 1380 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1381 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1382 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1383 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1384 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1385 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1386 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1387 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1388 } 1389 __ BIND(L_copy_bytes); 1390 __ subptr(qword_count, 8); 1391 __ jcc(Assembler::greaterEqual, L_loop); 1392 1393 __ addptr(qword_count, 4); // add(8) and sub(4) 1394 __ jccb(Assembler::less, L_end); 1395 // Copy trailing 32 bytes 1396 if (UseAVX >= 2) { 1397 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1398 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1399 } else { 1400 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1401 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1402 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1403 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1404 } 1405 __ subptr(qword_count, 4); 1406 __ BIND(L_end); 1407 if (UseAVX >= 2) { 1408 // clean upper bits of YMM registers 1409 __ vpxor(xmm0, xmm0); 1410 __ vpxor(xmm1, xmm1); 1411 } 1412 } else { 1413 // Copy 32-bytes per iteration 1414 __ BIND(L_loop); 1415 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1416 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1417 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1418 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1419 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1420 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1421 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1422 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1423 1424 __ BIND(L_copy_bytes); 1425 __ subptr(qword_count, 4); 1426 __ jcc(Assembler::greaterEqual, L_loop); 1427 } 1428 __ addptr(qword_count, 4); 1429 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1430 } 1431 1432 1433 // Arguments: 1434 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1435 // ignored 1436 // name - stub name string 1437 // 1438 // Inputs: 1439 // c_rarg0 - source array address 1440 // c_rarg1 - destination array address 1441 // c_rarg2 - element count, treated as ssize_t, can be zero 1442 // 1443 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1444 // we let the hardware handle it. The one to eight bytes within words, 1445 // dwords or qwords that span cache line boundaries will still be loaded 1446 // and stored atomically. 1447 // 1448 // Side Effects: 1449 // disjoint_byte_copy_entry is set to the no-overlap entry point 1450 // used by generate_conjoint_byte_copy(). 1451 // 1452 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1453 __ align(CodeEntryAlignment); 1454 StubCodeMark mark(this, "StubRoutines", name); 1455 address start = __ pc(); 1456 1457 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1458 Label L_copy_byte, L_exit; 1459 const Register from = rdi; // source array address 1460 const Register to = rsi; // destination array address 1461 const Register count = rdx; // elements count 1462 const Register byte_count = rcx; 1463 const Register qword_count = count; 1464 const Register end_from = from; // source array end address 1465 const Register end_to = to; // destination array end address 1466 // End pointers are inclusive, and if count is not zero they point 1467 // to the last unit copied: end_to[0] := end_from[0] 1468 1469 __ enter(); // required for proper stackwalking of RuntimeStub frame 1470 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1471 1472 if (entry != NULL) { 1473 *entry = __ pc(); 1474 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1475 BLOCK_COMMENT("Entry:"); 1476 } 1477 1478 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1479 // r9 and r10 may be used to save non-volatile registers 1480 1481 // 'from', 'to' and 'count' are now valid 1482 __ movptr(byte_count, count); 1483 __ shrptr(count, 3); // count => qword_count 1484 1485 // Copy from low to high addresses. Use 'to' as scratch. 1486 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1487 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1488 __ negptr(qword_count); // make the count negative 1489 __ jmp(L_copy_bytes); 1490 1491 // Copy trailing qwords 1492 __ BIND(L_copy_8_bytes); 1493 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1494 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1495 __ increment(qword_count); 1496 __ jcc(Assembler::notZero, L_copy_8_bytes); 1497 1498 // Check for and copy trailing dword 1499 __ BIND(L_copy_4_bytes); 1500 __ testl(byte_count, 4); 1501 __ jccb(Assembler::zero, L_copy_2_bytes); 1502 __ movl(rax, Address(end_from, 8)); 1503 __ movl(Address(end_to, 8), rax); 1504 1505 __ addptr(end_from, 4); 1506 __ addptr(end_to, 4); 1507 1508 // Check for and copy trailing word 1509 __ BIND(L_copy_2_bytes); 1510 __ testl(byte_count, 2); 1511 __ jccb(Assembler::zero, L_copy_byte); 1512 __ movw(rax, Address(end_from, 8)); 1513 __ movw(Address(end_to, 8), rax); 1514 1515 __ addptr(end_from, 2); 1516 __ addptr(end_to, 2); 1517 1518 // Check for and copy trailing byte 1519 __ BIND(L_copy_byte); 1520 __ testl(byte_count, 1); 1521 __ jccb(Assembler::zero, L_exit); 1522 __ movb(rax, Address(end_from, 8)); 1523 __ movb(Address(end_to, 8), rax); 1524 1525 __ BIND(L_exit); 1526 restore_arg_regs(); 1527 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1528 __ xorptr(rax, rax); // return 0 1529 __ leave(); // required for proper stackwalking of RuntimeStub frame 1530 __ ret(0); 1531 1532 // Copy in multi-bytes chunks 1533 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1534 __ jmp(L_copy_4_bytes); 1535 1536 return start; 1537 } 1538 1539 // Arguments: 1540 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1541 // ignored 1542 // name - stub name string 1543 // 1544 // Inputs: 1545 // c_rarg0 - source array address 1546 // c_rarg1 - destination array address 1547 // c_rarg2 - element count, treated as ssize_t, can be zero 1548 // 1549 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1550 // we let the hardware handle it. The one to eight bytes within words, 1551 // dwords or qwords that span cache line boundaries will still be loaded 1552 // and stored atomically. 1553 // 1554 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1555 address* entry, const char *name) { 1556 __ align(CodeEntryAlignment); 1557 StubCodeMark mark(this, "StubRoutines", name); 1558 address start = __ pc(); 1559 1560 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1561 const Register from = rdi; // source array address 1562 const Register to = rsi; // destination array address 1563 const Register count = rdx; // elements count 1564 const Register byte_count = rcx; 1565 const Register qword_count = count; 1566 1567 __ enter(); // required for proper stackwalking of RuntimeStub frame 1568 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1569 1570 if (entry != NULL) { 1571 *entry = __ pc(); 1572 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1573 BLOCK_COMMENT("Entry:"); 1574 } 1575 1576 array_overlap_test(nooverlap_target, Address::times_1); 1577 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1578 // r9 and r10 may be used to save non-volatile registers 1579 1580 // 'from', 'to' and 'count' are now valid 1581 __ movptr(byte_count, count); 1582 __ shrptr(count, 3); // count => qword_count 1583 1584 // Copy from high to low addresses. 1585 1586 // Check for and copy trailing byte 1587 __ testl(byte_count, 1); 1588 __ jcc(Assembler::zero, L_copy_2_bytes); 1589 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1590 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1591 __ decrement(byte_count); // Adjust for possible trailing word 1592 1593 // Check for and copy trailing word 1594 __ BIND(L_copy_2_bytes); 1595 __ testl(byte_count, 2); 1596 __ jcc(Assembler::zero, L_copy_4_bytes); 1597 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1598 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1599 1600 // Check for and copy trailing dword 1601 __ BIND(L_copy_4_bytes); 1602 __ testl(byte_count, 4); 1603 __ jcc(Assembler::zero, L_copy_bytes); 1604 __ movl(rax, Address(from, qword_count, Address::times_8)); 1605 __ movl(Address(to, qword_count, Address::times_8), rax); 1606 __ jmp(L_copy_bytes); 1607 1608 // Copy trailing qwords 1609 __ BIND(L_copy_8_bytes); 1610 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1611 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1612 __ decrement(qword_count); 1613 __ jcc(Assembler::notZero, L_copy_8_bytes); 1614 1615 restore_arg_regs(); 1616 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1617 __ xorptr(rax, rax); // return 0 1618 __ leave(); // required for proper stackwalking of RuntimeStub frame 1619 __ ret(0); 1620 1621 // Copy in multi-bytes chunks 1622 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1623 1624 restore_arg_regs(); 1625 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1626 __ xorptr(rax, rax); // return 0 1627 __ leave(); // required for proper stackwalking of RuntimeStub frame 1628 __ ret(0); 1629 1630 return start; 1631 } 1632 1633 // Arguments: 1634 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1635 // ignored 1636 // name - stub name string 1637 // 1638 // Inputs: 1639 // c_rarg0 - source array address 1640 // c_rarg1 - destination array address 1641 // c_rarg2 - element count, treated as ssize_t, can be zero 1642 // 1643 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1644 // let the hardware handle it. The two or four words within dwords 1645 // or qwords that span cache line boundaries will still be loaded 1646 // and stored atomically. 1647 // 1648 // Side Effects: 1649 // disjoint_short_copy_entry is set to the no-overlap entry point 1650 // used by generate_conjoint_short_copy(). 1651 // 1652 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1653 __ align(CodeEntryAlignment); 1654 StubCodeMark mark(this, "StubRoutines", name); 1655 address start = __ pc(); 1656 1657 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1658 const Register from = rdi; // source array address 1659 const Register to = rsi; // destination array address 1660 const Register count = rdx; // elements count 1661 const Register word_count = rcx; 1662 const Register qword_count = count; 1663 const Register end_from = from; // source array end address 1664 const Register end_to = to; // destination array end address 1665 // End pointers are inclusive, and if count is not zero they point 1666 // to the last unit copied: end_to[0] := end_from[0] 1667 1668 __ enter(); // required for proper stackwalking of RuntimeStub frame 1669 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1670 1671 if (entry != NULL) { 1672 *entry = __ pc(); 1673 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1674 BLOCK_COMMENT("Entry:"); 1675 } 1676 1677 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1678 // r9 and r10 may be used to save non-volatile registers 1679 1680 // 'from', 'to' and 'count' are now valid 1681 __ movptr(word_count, count); 1682 __ shrptr(count, 2); // count => qword_count 1683 1684 // Copy from low to high addresses. Use 'to' as scratch. 1685 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1686 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1687 __ negptr(qword_count); 1688 __ jmp(L_copy_bytes); 1689 1690 // Copy trailing qwords 1691 __ BIND(L_copy_8_bytes); 1692 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1693 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1694 __ increment(qword_count); 1695 __ jcc(Assembler::notZero, L_copy_8_bytes); 1696 1697 // Original 'dest' is trashed, so we can't use it as a 1698 // base register for a possible trailing word copy 1699 1700 // Check for and copy trailing dword 1701 __ BIND(L_copy_4_bytes); 1702 __ testl(word_count, 2); 1703 __ jccb(Assembler::zero, L_copy_2_bytes); 1704 __ movl(rax, Address(end_from, 8)); 1705 __ movl(Address(end_to, 8), rax); 1706 1707 __ addptr(end_from, 4); 1708 __ addptr(end_to, 4); 1709 1710 // Check for and copy trailing word 1711 __ BIND(L_copy_2_bytes); 1712 __ testl(word_count, 1); 1713 __ jccb(Assembler::zero, L_exit); 1714 __ movw(rax, Address(end_from, 8)); 1715 __ movw(Address(end_to, 8), rax); 1716 1717 __ BIND(L_exit); 1718 restore_arg_regs(); 1719 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1720 __ xorptr(rax, rax); // return 0 1721 __ leave(); // required for proper stackwalking of RuntimeStub frame 1722 __ ret(0); 1723 1724 // Copy in multi-bytes chunks 1725 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1726 __ jmp(L_copy_4_bytes); 1727 1728 return start; 1729 } 1730 1731 address generate_fill(BasicType t, bool aligned, const char *name) { 1732 __ align(CodeEntryAlignment); 1733 StubCodeMark mark(this, "StubRoutines", name); 1734 address start = __ pc(); 1735 1736 BLOCK_COMMENT("Entry:"); 1737 1738 const Register to = c_rarg0; // source array address 1739 const Register value = c_rarg1; // value 1740 const Register count = c_rarg2; // elements count 1741 1742 __ enter(); // required for proper stackwalking of RuntimeStub frame 1743 1744 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1745 1746 __ leave(); // required for proper stackwalking of RuntimeStub frame 1747 __ ret(0); 1748 return start; 1749 } 1750 1751 // Arguments: 1752 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1753 // ignored 1754 // name - stub name string 1755 // 1756 // Inputs: 1757 // c_rarg0 - source array address 1758 // c_rarg1 - destination array address 1759 // c_rarg2 - element count, treated as ssize_t, can be zero 1760 // 1761 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1762 // let the hardware handle it. The two or four words within dwords 1763 // or qwords that span cache line boundaries will still be loaded 1764 // and stored atomically. 1765 // 1766 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1767 address *entry, const char *name) { 1768 __ align(CodeEntryAlignment); 1769 StubCodeMark mark(this, "StubRoutines", name); 1770 address start = __ pc(); 1771 1772 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1773 const Register from = rdi; // source array address 1774 const Register to = rsi; // destination array address 1775 const Register count = rdx; // elements count 1776 const Register word_count = rcx; 1777 const Register qword_count = count; 1778 1779 __ enter(); // required for proper stackwalking of RuntimeStub frame 1780 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1781 1782 if (entry != NULL) { 1783 *entry = __ pc(); 1784 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1785 BLOCK_COMMENT("Entry:"); 1786 } 1787 1788 array_overlap_test(nooverlap_target, Address::times_2); 1789 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1790 // r9 and r10 may be used to save non-volatile registers 1791 1792 // 'from', 'to' and 'count' are now valid 1793 __ movptr(word_count, count); 1794 __ shrptr(count, 2); // count => qword_count 1795 1796 // Copy from high to low addresses. Use 'to' as scratch. 1797 1798 // Check for and copy trailing word 1799 __ testl(word_count, 1); 1800 __ jccb(Assembler::zero, L_copy_4_bytes); 1801 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1802 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1803 1804 // Check for and copy trailing dword 1805 __ BIND(L_copy_4_bytes); 1806 __ testl(word_count, 2); 1807 __ jcc(Assembler::zero, L_copy_bytes); 1808 __ movl(rax, Address(from, qword_count, Address::times_8)); 1809 __ movl(Address(to, qword_count, Address::times_8), rax); 1810 __ jmp(L_copy_bytes); 1811 1812 // Copy trailing qwords 1813 __ BIND(L_copy_8_bytes); 1814 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1815 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1816 __ decrement(qword_count); 1817 __ jcc(Assembler::notZero, L_copy_8_bytes); 1818 1819 restore_arg_regs(); 1820 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1821 __ xorptr(rax, rax); // return 0 1822 __ leave(); // required for proper stackwalking of RuntimeStub frame 1823 __ ret(0); 1824 1825 // Copy in multi-bytes chunks 1826 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1827 1828 restore_arg_regs(); 1829 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1830 __ xorptr(rax, rax); // return 0 1831 __ leave(); // required for proper stackwalking of RuntimeStub frame 1832 __ ret(0); 1833 1834 return start; 1835 } 1836 1837 // Arguments: 1838 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1839 // ignored 1840 // is_oop - true => oop array, so generate store check code 1841 // name - stub name string 1842 // 1843 // Inputs: 1844 // c_rarg0 - source array address 1845 // c_rarg1 - destination array address 1846 // c_rarg2 - element count, treated as ssize_t, can be zero 1847 // 1848 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1849 // the hardware handle it. The two dwords within qwords that span 1850 // cache line boundaries will still be loaded and stored atomicly. 1851 // 1852 // Side Effects: 1853 // disjoint_int_copy_entry is set to the no-overlap entry point 1854 // used by generate_conjoint_int_oop_copy(). 1855 // 1856 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1857 const char *name, bool dest_uninitialized = false) { 1858 __ align(CodeEntryAlignment); 1859 StubCodeMark mark(this, "StubRoutines", name); 1860 address start = __ pc(); 1861 1862 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1863 const Register from = rdi; // source array address 1864 const Register to = rsi; // destination array address 1865 const Register count = rdx; // elements count 1866 const Register dword_count = rcx; 1867 const Register qword_count = count; 1868 const Register end_from = from; // source array end address 1869 const Register end_to = to; // destination array end address 1870 const Register saved_to = r11; // saved destination array address 1871 // End pointers are inclusive, and if count is not zero they point 1872 // to the last unit copied: end_to[0] := end_from[0] 1873 1874 __ enter(); // required for proper stackwalking of RuntimeStub frame 1875 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1876 1877 if (entry != NULL) { 1878 *entry = __ pc(); 1879 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1880 BLOCK_COMMENT("Entry:"); 1881 } 1882 1883 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1884 // r9 and r10 may be used to save non-volatile registers 1885 if (is_oop) { 1886 __ movq(saved_to, to); 1887 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1888 } 1889 1890 // 'from', 'to' and 'count' are now valid 1891 __ movptr(dword_count, count); 1892 __ shrptr(count, 1); // count => qword_count 1893 1894 // Copy from low to high addresses. Use 'to' as scratch. 1895 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1896 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1897 __ negptr(qword_count); 1898 __ jmp(L_copy_bytes); 1899 1900 // Copy trailing qwords 1901 __ BIND(L_copy_8_bytes); 1902 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1903 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1904 __ increment(qword_count); 1905 __ jcc(Assembler::notZero, L_copy_8_bytes); 1906 1907 // Check for and copy trailing dword 1908 __ BIND(L_copy_4_bytes); 1909 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1910 __ jccb(Assembler::zero, L_exit); 1911 __ movl(rax, Address(end_from, 8)); 1912 __ movl(Address(end_to, 8), rax); 1913 1914 __ BIND(L_exit); 1915 if (is_oop) { 1916 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1917 } 1918 restore_arg_regs(); 1919 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1920 __ xorptr(rax, rax); // return 0 1921 __ leave(); // required for proper stackwalking of RuntimeStub frame 1922 __ ret(0); 1923 1924 // Copy in multi-bytes chunks 1925 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1926 __ jmp(L_copy_4_bytes); 1927 1928 return start; 1929 } 1930 1931 // Arguments: 1932 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1933 // ignored 1934 // is_oop - true => oop array, so generate store check code 1935 // name - stub name string 1936 // 1937 // Inputs: 1938 // c_rarg0 - source array address 1939 // c_rarg1 - destination array address 1940 // c_rarg2 - element count, treated as ssize_t, can be zero 1941 // 1942 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1943 // the hardware handle it. The two dwords within qwords that span 1944 // cache line boundaries will still be loaded and stored atomicly. 1945 // 1946 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1947 address *entry, const char *name, 1948 bool dest_uninitialized = false) { 1949 __ align(CodeEntryAlignment); 1950 StubCodeMark mark(this, "StubRoutines", name); 1951 address start = __ pc(); 1952 1953 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1954 const Register from = rdi; // source array address 1955 const Register to = rsi; // destination array address 1956 const Register count = rdx; // elements count 1957 const Register dword_count = rcx; 1958 const Register qword_count = count; 1959 1960 __ enter(); // required for proper stackwalking of RuntimeStub frame 1961 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1962 1963 if (entry != NULL) { 1964 *entry = __ pc(); 1965 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1966 BLOCK_COMMENT("Entry:"); 1967 } 1968 1969 array_overlap_test(nooverlap_target, Address::times_4); 1970 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1971 // r9 and r10 may be used to save non-volatile registers 1972 1973 if (is_oop) { 1974 // no registers are destroyed by this call 1975 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1976 } 1977 1978 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1979 // 'from', 'to' and 'count' are now valid 1980 __ movptr(dword_count, count); 1981 __ shrptr(count, 1); // count => qword_count 1982 1983 // Copy from high to low addresses. Use 'to' as scratch. 1984 1985 // Check for and copy trailing dword 1986 __ testl(dword_count, 1); 1987 __ jcc(Assembler::zero, L_copy_bytes); 1988 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1989 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1990 __ jmp(L_copy_bytes); 1991 1992 // Copy trailing qwords 1993 __ BIND(L_copy_8_bytes); 1994 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1995 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1996 __ decrement(qword_count); 1997 __ jcc(Assembler::notZero, L_copy_8_bytes); 1998 1999 if (is_oop) { 2000 __ jmp(L_exit); 2001 } 2002 restore_arg_regs(); 2003 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2004 __ xorptr(rax, rax); // return 0 2005 __ leave(); // required for proper stackwalking of RuntimeStub frame 2006 __ ret(0); 2007 2008 // Copy in multi-bytes chunks 2009 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2010 2011 __ BIND(L_exit); 2012 if (is_oop) { 2013 gen_write_ref_array_post_barrier(to, dword_count, rax); 2014 } 2015 restore_arg_regs(); 2016 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2017 __ xorptr(rax, rax); // return 0 2018 __ leave(); // required for proper stackwalking of RuntimeStub frame 2019 __ ret(0); 2020 2021 return start; 2022 } 2023 2024 // Arguments: 2025 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2026 // ignored 2027 // is_oop - true => oop array, so generate store check code 2028 // name - stub name string 2029 // 2030 // Inputs: 2031 // c_rarg0 - source array address 2032 // c_rarg1 - destination array address 2033 // c_rarg2 - element count, treated as ssize_t, can be zero 2034 // 2035 // Side Effects: 2036 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2037 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2038 // 2039 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2040 const char *name, bool dest_uninitialized = false) { 2041 __ align(CodeEntryAlignment); 2042 StubCodeMark mark(this, "StubRoutines", name); 2043 address start = __ pc(); 2044 2045 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2046 const Register from = rdi; // source array address 2047 const Register to = rsi; // destination array address 2048 const Register qword_count = rdx; // elements count 2049 const Register end_from = from; // source array end address 2050 const Register end_to = rcx; // destination array end address 2051 const Register saved_to = to; 2052 const Register saved_count = r11; 2053 // End pointers are inclusive, and if count is not zero they point 2054 // to the last unit copied: end_to[0] := end_from[0] 2055 2056 __ enter(); // required for proper stackwalking of RuntimeStub frame 2057 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2058 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2059 2060 if (entry != NULL) { 2061 *entry = __ pc(); 2062 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2063 BLOCK_COMMENT("Entry:"); 2064 } 2065 2066 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2067 // r9 and r10 may be used to save non-volatile registers 2068 // 'from', 'to' and 'qword_count' are now valid 2069 if (is_oop) { 2070 // Save to and count for store barrier 2071 __ movptr(saved_count, qword_count); 2072 // no registers are destroyed by this call 2073 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2074 } 2075 2076 // Copy from low to high addresses. Use 'to' as scratch. 2077 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2078 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2079 __ negptr(qword_count); 2080 __ jmp(L_copy_bytes); 2081 2082 // Copy trailing qwords 2083 __ BIND(L_copy_8_bytes); 2084 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2085 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2086 __ increment(qword_count); 2087 __ jcc(Assembler::notZero, L_copy_8_bytes); 2088 2089 if (is_oop) { 2090 __ jmp(L_exit); 2091 } else { 2092 restore_arg_regs(); 2093 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2094 __ xorptr(rax, rax); // return 0 2095 __ leave(); // required for proper stackwalking of RuntimeStub frame 2096 __ ret(0); 2097 } 2098 2099 // Copy in multi-bytes chunks 2100 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2101 2102 if (is_oop) { 2103 __ BIND(L_exit); 2104 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2105 } 2106 restore_arg_regs(); 2107 if (is_oop) { 2108 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2109 } else { 2110 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2111 } 2112 __ xorptr(rax, rax); // return 0 2113 __ leave(); // required for proper stackwalking of RuntimeStub frame 2114 __ ret(0); 2115 2116 return start; 2117 } 2118 2119 // Arguments: 2120 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2121 // ignored 2122 // is_oop - true => oop array, so generate store check code 2123 // name - stub name string 2124 // 2125 // Inputs: 2126 // c_rarg0 - source array address 2127 // c_rarg1 - destination array address 2128 // c_rarg2 - element count, treated as ssize_t, can be zero 2129 // 2130 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2131 address nooverlap_target, address *entry, 2132 const char *name, bool dest_uninitialized = false) { 2133 __ align(CodeEntryAlignment); 2134 StubCodeMark mark(this, "StubRoutines", name); 2135 address start = __ pc(); 2136 2137 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2138 const Register from = rdi; // source array address 2139 const Register to = rsi; // destination array address 2140 const Register qword_count = rdx; // elements count 2141 const Register saved_count = rcx; 2142 2143 __ enter(); // required for proper stackwalking of RuntimeStub frame 2144 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2145 2146 if (entry != NULL) { 2147 *entry = __ pc(); 2148 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2149 BLOCK_COMMENT("Entry:"); 2150 } 2151 2152 array_overlap_test(nooverlap_target, Address::times_8); 2153 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2154 // r9 and r10 may be used to save non-volatile registers 2155 // 'from', 'to' and 'qword_count' are now valid 2156 if (is_oop) { 2157 // Save to and count for store barrier 2158 __ movptr(saved_count, qword_count); 2159 // No registers are destroyed by this call 2160 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2161 } 2162 2163 __ jmp(L_copy_bytes); 2164 2165 // Copy trailing qwords 2166 __ BIND(L_copy_8_bytes); 2167 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2168 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2169 __ decrement(qword_count); 2170 __ jcc(Assembler::notZero, L_copy_8_bytes); 2171 2172 if (is_oop) { 2173 __ jmp(L_exit); 2174 } else { 2175 restore_arg_regs(); 2176 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2177 __ xorptr(rax, rax); // return 0 2178 __ leave(); // required for proper stackwalking of RuntimeStub frame 2179 __ ret(0); 2180 } 2181 2182 // Copy in multi-bytes chunks 2183 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2184 2185 if (is_oop) { 2186 __ BIND(L_exit); 2187 gen_write_ref_array_post_barrier(to, saved_count, rax); 2188 } 2189 restore_arg_regs(); 2190 if (is_oop) { 2191 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2192 } else { 2193 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2194 } 2195 __ xorptr(rax, rax); // return 0 2196 __ leave(); // required for proper stackwalking of RuntimeStub frame 2197 __ ret(0); 2198 2199 return start; 2200 } 2201 2202 2203 // Helper for generating a dynamic type check. 2204 // Smashes no registers. 2205 void generate_type_check(Register sub_klass, 2206 Register super_check_offset, 2207 Register super_klass, 2208 Label& L_success) { 2209 assert_different_registers(sub_klass, super_check_offset, super_klass); 2210 2211 BLOCK_COMMENT("type_check:"); 2212 2213 Label L_miss; 2214 2215 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2216 super_check_offset); 2217 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2218 2219 // Fall through on failure! 2220 __ BIND(L_miss); 2221 } 2222 2223 // 2224 // Generate checkcasting array copy stub 2225 // 2226 // Input: 2227 // c_rarg0 - source array address 2228 // c_rarg1 - destination array address 2229 // c_rarg2 - element count, treated as ssize_t, can be zero 2230 // c_rarg3 - size_t ckoff (super_check_offset) 2231 // not Win64 2232 // c_rarg4 - oop ckval (super_klass) 2233 // Win64 2234 // rsp+40 - oop ckval (super_klass) 2235 // 2236 // Output: 2237 // rax == 0 - success 2238 // rax == -1^K - failure, where K is partial transfer count 2239 // 2240 address generate_checkcast_copy(const char *name, address *entry, 2241 bool dest_uninitialized = false) { 2242 2243 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2244 2245 // Input registers (after setup_arg_regs) 2246 const Register from = rdi; // source array address 2247 const Register to = rsi; // destination array address 2248 const Register length = rdx; // elements count 2249 const Register ckoff = rcx; // super_check_offset 2250 const Register ckval = r8; // super_klass 2251 2252 // Registers used as temps (r13, r14 are save-on-entry) 2253 const Register end_from = from; // source array end address 2254 const Register end_to = r13; // destination array end address 2255 const Register count = rdx; // -(count_remaining) 2256 const Register r14_length = r14; // saved copy of length 2257 // End pointers are inclusive, and if length is not zero they point 2258 // to the last unit copied: end_to[0] := end_from[0] 2259 2260 const Register rax_oop = rax; // actual oop copied 2261 const Register r11_klass = r11; // oop._klass 2262 2263 //--------------------------------------------------------------- 2264 // Assembler stub will be used for this call to arraycopy 2265 // if the two arrays are subtypes of Object[] but the 2266 // destination array type is not equal to or a supertype 2267 // of the source type. Each element must be separately 2268 // checked. 2269 2270 __ align(CodeEntryAlignment); 2271 StubCodeMark mark(this, "StubRoutines", name); 2272 address start = __ pc(); 2273 2274 __ enter(); // required for proper stackwalking of RuntimeStub frame 2275 2276 #ifdef ASSERT 2277 // caller guarantees that the arrays really are different 2278 // otherwise, we would have to make conjoint checks 2279 { Label L; 2280 array_overlap_test(L, TIMES_OOP); 2281 __ stop("checkcast_copy within a single array"); 2282 __ bind(L); 2283 } 2284 #endif //ASSERT 2285 2286 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2287 // ckoff => rcx, ckval => r8 2288 // r9 and r10 may be used to save non-volatile registers 2289 #ifdef _WIN64 2290 // last argument (#4) is on stack on Win64 2291 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2292 #endif 2293 2294 // Caller of this entry point must set up the argument registers. 2295 if (entry != NULL) { 2296 *entry = __ pc(); 2297 BLOCK_COMMENT("Entry:"); 2298 } 2299 2300 // allocate spill slots for r13, r14 2301 enum { 2302 saved_r13_offset, 2303 saved_r14_offset, 2304 saved_rbp_offset 2305 }; 2306 __ subptr(rsp, saved_rbp_offset * wordSize); 2307 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2308 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2309 2310 // check that int operands are properly extended to size_t 2311 assert_clean_int(length, rax); 2312 assert_clean_int(ckoff, rax); 2313 2314 #ifdef ASSERT 2315 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2316 // The ckoff and ckval must be mutually consistent, 2317 // even though caller generates both. 2318 { Label L; 2319 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2320 __ cmpl(ckoff, Address(ckval, sco_offset)); 2321 __ jcc(Assembler::equal, L); 2322 __ stop("super_check_offset inconsistent"); 2323 __ bind(L); 2324 } 2325 #endif //ASSERT 2326 2327 // Loop-invariant addresses. They are exclusive end pointers. 2328 Address end_from_addr(from, length, TIMES_OOP, 0); 2329 Address end_to_addr(to, length, TIMES_OOP, 0); 2330 // Loop-variant addresses. They assume post-incremented count < 0. 2331 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2332 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2333 2334 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2335 2336 // Copy from low to high addresses, indexed from the end of each array. 2337 __ lea(end_from, end_from_addr); 2338 __ lea(end_to, end_to_addr); 2339 __ movptr(r14_length, length); // save a copy of the length 2340 assert(length == count, ""); // else fix next line: 2341 __ negptr(count); // negate and test the length 2342 __ jcc(Assembler::notZero, L_load_element); 2343 2344 // Empty array: Nothing to do. 2345 __ xorptr(rax, rax); // return 0 on (trivial) success 2346 __ jmp(L_done); 2347 2348 // ======== begin loop ======== 2349 // (Loop is rotated; its entry is L_load_element.) 2350 // Loop control: 2351 // for (count = -count; count != 0; count++) 2352 // Base pointers src, dst are biased by 8*(count-1),to last element. 2353 __ align(OptoLoopAlignment); 2354 2355 __ BIND(L_store_element); 2356 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2357 __ increment(count); // increment the count toward zero 2358 __ jcc(Assembler::zero, L_do_card_marks); 2359 2360 // ======== loop entry is here ======== 2361 __ BIND(L_load_element); 2362 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2363 __ testptr(rax_oop, rax_oop); 2364 __ jcc(Assembler::zero, L_store_element); 2365 2366 __ load_klass(r11_klass, rax_oop);// query the object klass 2367 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2368 // ======== end loop ======== 2369 2370 // It was a real error; we must depend on the caller to finish the job. 2371 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2372 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2373 // and report their number to the caller. 2374 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2375 Label L_post_barrier; 2376 __ addptr(r14_length, count); // K = (original - remaining) oops 2377 __ movptr(rax, r14_length); // save the value 2378 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2379 __ jccb(Assembler::notZero, L_post_barrier); 2380 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2381 2382 // Come here on success only. 2383 __ BIND(L_do_card_marks); 2384 __ xorptr(rax, rax); // return 0 on success 2385 2386 __ BIND(L_post_barrier); 2387 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2388 2389 // Common exit point (success or failure). 2390 __ BIND(L_done); 2391 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2392 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2393 restore_arg_regs(); 2394 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2395 __ leave(); // required for proper stackwalking of RuntimeStub frame 2396 __ ret(0); 2397 2398 return start; 2399 } 2400 2401 // 2402 // Generate 'unsafe' array copy stub 2403 // Though just as safe as the other stubs, it takes an unscaled 2404 // size_t argument instead of an element count. 2405 // 2406 // Input: 2407 // c_rarg0 - source array address 2408 // c_rarg1 - destination array address 2409 // c_rarg2 - byte count, treated as ssize_t, can be zero 2410 // 2411 // Examines the alignment of the operands and dispatches 2412 // to a long, int, short, or byte copy loop. 2413 // 2414 address generate_unsafe_copy(const char *name, 2415 address byte_copy_entry, address short_copy_entry, 2416 address int_copy_entry, address long_copy_entry) { 2417 2418 Label L_long_aligned, L_int_aligned, L_short_aligned; 2419 2420 // Input registers (before setup_arg_regs) 2421 const Register from = c_rarg0; // source array address 2422 const Register to = c_rarg1; // destination array address 2423 const Register size = c_rarg2; // byte count (size_t) 2424 2425 // Register used as a temp 2426 const Register bits = rax; // test copy of low bits 2427 2428 __ align(CodeEntryAlignment); 2429 StubCodeMark mark(this, "StubRoutines", name); 2430 address start = __ pc(); 2431 2432 __ enter(); // required for proper stackwalking of RuntimeStub frame 2433 2434 // bump this on entry, not on exit: 2435 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2436 2437 __ mov(bits, from); 2438 __ orptr(bits, to); 2439 __ orptr(bits, size); 2440 2441 __ testb(bits, BytesPerLong-1); 2442 __ jccb(Assembler::zero, L_long_aligned); 2443 2444 __ testb(bits, BytesPerInt-1); 2445 __ jccb(Assembler::zero, L_int_aligned); 2446 2447 __ testb(bits, BytesPerShort-1); 2448 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2449 2450 __ BIND(L_short_aligned); 2451 __ shrptr(size, LogBytesPerShort); // size => short_count 2452 __ jump(RuntimeAddress(short_copy_entry)); 2453 2454 __ BIND(L_int_aligned); 2455 __ shrptr(size, LogBytesPerInt); // size => int_count 2456 __ jump(RuntimeAddress(int_copy_entry)); 2457 2458 __ BIND(L_long_aligned); 2459 __ shrptr(size, LogBytesPerLong); // size => qword_count 2460 __ jump(RuntimeAddress(long_copy_entry)); 2461 2462 return start; 2463 } 2464 2465 // Perform range checks on the proposed arraycopy. 2466 // Kills temp, but nothing else. 2467 // Also, clean the sign bits of src_pos and dst_pos. 2468 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2469 Register src_pos, // source position (c_rarg1) 2470 Register dst, // destination array oo (c_rarg2) 2471 Register dst_pos, // destination position (c_rarg3) 2472 Register length, 2473 Register temp, 2474 Label& L_failed) { 2475 BLOCK_COMMENT("arraycopy_range_checks:"); 2476 2477 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2478 __ movl(temp, length); 2479 __ addl(temp, src_pos); // src_pos + length 2480 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2481 __ jcc(Assembler::above, L_failed); 2482 2483 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2484 __ movl(temp, length); 2485 __ addl(temp, dst_pos); // dst_pos + length 2486 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2487 __ jcc(Assembler::above, L_failed); 2488 2489 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2490 // Move with sign extension can be used since they are positive. 2491 __ movslq(src_pos, src_pos); 2492 __ movslq(dst_pos, dst_pos); 2493 2494 BLOCK_COMMENT("arraycopy_range_checks done"); 2495 } 2496 2497 // 2498 // Generate generic array copy stubs 2499 // 2500 // Input: 2501 // c_rarg0 - src oop 2502 // c_rarg1 - src_pos (32-bits) 2503 // c_rarg2 - dst oop 2504 // c_rarg3 - dst_pos (32-bits) 2505 // not Win64 2506 // c_rarg4 - element count (32-bits) 2507 // Win64 2508 // rsp+40 - element count (32-bits) 2509 // 2510 // Output: 2511 // rax == 0 - success 2512 // rax == -1^K - failure, where K is partial transfer count 2513 // 2514 address generate_generic_copy(const char *name, 2515 address byte_copy_entry, address short_copy_entry, 2516 address int_copy_entry, address oop_copy_entry, 2517 address long_copy_entry, address checkcast_copy_entry) { 2518 2519 Label L_failed, L_failed_0, L_objArray; 2520 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2521 2522 // Input registers 2523 const Register src = c_rarg0; // source array oop 2524 const Register src_pos = c_rarg1; // source position 2525 const Register dst = c_rarg2; // destination array oop 2526 const Register dst_pos = c_rarg3; // destination position 2527 #ifndef _WIN64 2528 const Register length = c_rarg4; 2529 #else 2530 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2531 #endif 2532 2533 { int modulus = CodeEntryAlignment; 2534 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2535 int advance = target - (__ offset() % modulus); 2536 if (advance < 0) advance += modulus; 2537 if (advance > 0) __ nop(advance); 2538 } 2539 StubCodeMark mark(this, "StubRoutines", name); 2540 2541 // Short-hop target to L_failed. Makes for denser prologue code. 2542 __ BIND(L_failed_0); 2543 __ jmp(L_failed); 2544 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2545 2546 __ align(CodeEntryAlignment); 2547 address start = __ pc(); 2548 2549 __ enter(); // required for proper stackwalking of RuntimeStub frame 2550 2551 // bump this on entry, not on exit: 2552 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2553 2554 //----------------------------------------------------------------------- 2555 // Assembler stub will be used for this call to arraycopy 2556 // if the following conditions are met: 2557 // 2558 // (1) src and dst must not be null. 2559 // (2) src_pos must not be negative. 2560 // (3) dst_pos must not be negative. 2561 // (4) length must not be negative. 2562 // (5) src klass and dst klass should be the same and not NULL. 2563 // (6) src and dst should be arrays. 2564 // (7) src_pos + length must not exceed length of src. 2565 // (8) dst_pos + length must not exceed length of dst. 2566 // 2567 2568 // if (src == NULL) return -1; 2569 __ testptr(src, src); // src oop 2570 size_t j1off = __ offset(); 2571 __ jccb(Assembler::zero, L_failed_0); 2572 2573 // if (src_pos < 0) return -1; 2574 __ testl(src_pos, src_pos); // src_pos (32-bits) 2575 __ jccb(Assembler::negative, L_failed_0); 2576 2577 // if (dst == NULL) return -1; 2578 __ testptr(dst, dst); // dst oop 2579 __ jccb(Assembler::zero, L_failed_0); 2580 2581 // if (dst_pos < 0) return -1; 2582 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2583 size_t j4off = __ offset(); 2584 __ jccb(Assembler::negative, L_failed_0); 2585 2586 // The first four tests are very dense code, 2587 // but not quite dense enough to put four 2588 // jumps in a 16-byte instruction fetch buffer. 2589 // That's good, because some branch predicters 2590 // do not like jumps so close together. 2591 // Make sure of this. 2592 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2593 2594 // registers used as temp 2595 const Register r11_length = r11; // elements count to copy 2596 const Register r10_src_klass = r10; // array klass 2597 2598 // if (length < 0) return -1; 2599 __ movl(r11_length, length); // length (elements count, 32-bits value) 2600 __ testl(r11_length, r11_length); 2601 __ jccb(Assembler::negative, L_failed_0); 2602 2603 __ load_klass(r10_src_klass, src); 2604 #ifdef ASSERT 2605 // assert(src->klass() != NULL); 2606 { 2607 BLOCK_COMMENT("assert klasses not null {"); 2608 Label L1, L2; 2609 __ testptr(r10_src_klass, r10_src_klass); 2610 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2611 __ bind(L1); 2612 __ stop("broken null klass"); 2613 __ bind(L2); 2614 __ load_klass(rax, dst); 2615 __ cmpq(rax, 0); 2616 __ jcc(Assembler::equal, L1); // this would be broken also 2617 BLOCK_COMMENT("} assert klasses not null done"); 2618 } 2619 #endif 2620 2621 // Load layout helper (32-bits) 2622 // 2623 // |array_tag| | header_size | element_type | |log2_element_size| 2624 // 32 30 24 16 8 2 0 2625 // 2626 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2627 // 2628 2629 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2630 2631 // Handle objArrays completely differently... 2632 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2633 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2634 __ jcc(Assembler::equal, L_objArray); 2635 2636 // if (src->klass() != dst->klass()) return -1; 2637 __ load_klass(rax, dst); 2638 __ cmpq(r10_src_klass, rax); 2639 __ jcc(Assembler::notEqual, L_failed); 2640 2641 const Register rax_lh = rax; // layout helper 2642 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2643 2644 // if (!src->is_Array()) return -1; 2645 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2646 __ jcc(Assembler::greaterEqual, L_failed); 2647 2648 // At this point, it is known to be a typeArray (array_tag 0x3). 2649 #ifdef ASSERT 2650 { 2651 BLOCK_COMMENT("assert primitive array {"); 2652 Label L; 2653 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2654 __ jcc(Assembler::greaterEqual, L); 2655 __ stop("must be a primitive array"); 2656 __ bind(L); 2657 BLOCK_COMMENT("} assert primitive array done"); 2658 } 2659 #endif 2660 2661 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2662 r10, L_failed); 2663 2664 // TypeArrayKlass 2665 // 2666 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2667 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2668 // 2669 2670 const Register r10_offset = r10; // array offset 2671 const Register rax_elsize = rax_lh; // element size 2672 2673 __ movl(r10_offset, rax_lh); 2674 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2675 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2676 __ addptr(src, r10_offset); // src array offset 2677 __ addptr(dst, r10_offset); // dst array offset 2678 BLOCK_COMMENT("choose copy loop based on element size"); 2679 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2680 2681 // next registers should be set before the jump to corresponding stub 2682 const Register from = c_rarg0; // source array address 2683 const Register to = c_rarg1; // destination array address 2684 const Register count = c_rarg2; // elements count 2685 2686 // 'from', 'to', 'count' registers should be set in such order 2687 // since they are the same as 'src', 'src_pos', 'dst'. 2688 2689 __ BIND(L_copy_bytes); 2690 __ cmpl(rax_elsize, 0); 2691 __ jccb(Assembler::notEqual, L_copy_shorts); 2692 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2693 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2694 __ movl2ptr(count, r11_length); // length 2695 __ jump(RuntimeAddress(byte_copy_entry)); 2696 2697 __ BIND(L_copy_shorts); 2698 __ cmpl(rax_elsize, LogBytesPerShort); 2699 __ jccb(Assembler::notEqual, L_copy_ints); 2700 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2701 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2702 __ movl2ptr(count, r11_length); // length 2703 __ jump(RuntimeAddress(short_copy_entry)); 2704 2705 __ BIND(L_copy_ints); 2706 __ cmpl(rax_elsize, LogBytesPerInt); 2707 __ jccb(Assembler::notEqual, L_copy_longs); 2708 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2709 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2710 __ movl2ptr(count, r11_length); // length 2711 __ jump(RuntimeAddress(int_copy_entry)); 2712 2713 __ BIND(L_copy_longs); 2714 #ifdef ASSERT 2715 { 2716 BLOCK_COMMENT("assert long copy {"); 2717 Label L; 2718 __ cmpl(rax_elsize, LogBytesPerLong); 2719 __ jcc(Assembler::equal, L); 2720 __ stop("must be long copy, but elsize is wrong"); 2721 __ bind(L); 2722 BLOCK_COMMENT("} assert long copy done"); 2723 } 2724 #endif 2725 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2726 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2727 __ movl2ptr(count, r11_length); // length 2728 __ jump(RuntimeAddress(long_copy_entry)); 2729 2730 // ObjArrayKlass 2731 __ BIND(L_objArray); 2732 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2733 2734 Label L_plain_copy, L_checkcast_copy; 2735 // test array classes for subtyping 2736 __ load_klass(rax, dst); 2737 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2738 __ jcc(Assembler::notEqual, L_checkcast_copy); 2739 2740 // Identically typed arrays can be copied without element-wise checks. 2741 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2742 r10, L_failed); 2743 2744 __ lea(from, Address(src, src_pos, TIMES_OOP, 2745 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2746 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2747 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2748 __ movl2ptr(count, r11_length); // length 2749 __ BIND(L_plain_copy); 2750 __ jump(RuntimeAddress(oop_copy_entry)); 2751 2752 __ BIND(L_checkcast_copy); 2753 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2754 { 2755 // Before looking at dst.length, make sure dst is also an objArray. 2756 __ cmpl(Address(rax, lh_offset), objArray_lh); 2757 __ jcc(Assembler::notEqual, L_failed); 2758 2759 // It is safe to examine both src.length and dst.length. 2760 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2761 rax, L_failed); 2762 2763 const Register r11_dst_klass = r11; 2764 __ load_klass(r11_dst_klass, dst); // reload 2765 2766 // Marshal the base address arguments now, freeing registers. 2767 __ lea(from, Address(src, src_pos, TIMES_OOP, 2768 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2769 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2770 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2771 __ movl(count, length); // length (reloaded) 2772 Register sco_temp = c_rarg3; // this register is free now 2773 assert_different_registers(from, to, count, sco_temp, 2774 r11_dst_klass, r10_src_klass); 2775 assert_clean_int(count, sco_temp); 2776 2777 // Generate the type check. 2778 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2779 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2780 assert_clean_int(sco_temp, rax); 2781 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2782 2783 // Fetch destination element klass from the ObjArrayKlass header. 2784 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2785 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2786 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2787 assert_clean_int(sco_temp, rax); 2788 2789 // the checkcast_copy loop needs two extra arguments: 2790 assert(c_rarg3 == sco_temp, "#3 already in place"); 2791 // Set up arguments for checkcast_copy_entry. 2792 setup_arg_regs(4); 2793 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2794 __ jump(RuntimeAddress(checkcast_copy_entry)); 2795 } 2796 2797 __ BIND(L_failed); 2798 __ xorptr(rax, rax); 2799 __ notptr(rax); // return -1 2800 __ leave(); // required for proper stackwalking of RuntimeStub frame 2801 __ ret(0); 2802 2803 return start; 2804 } 2805 2806 void generate_arraycopy_stubs() { 2807 address entry; 2808 address entry_jbyte_arraycopy; 2809 address entry_jshort_arraycopy; 2810 address entry_jint_arraycopy; 2811 address entry_oop_arraycopy; 2812 address entry_jlong_arraycopy; 2813 address entry_checkcast_arraycopy; 2814 2815 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2816 "jbyte_disjoint_arraycopy"); 2817 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2818 "jbyte_arraycopy"); 2819 2820 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2821 "jshort_disjoint_arraycopy"); 2822 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2823 "jshort_arraycopy"); 2824 2825 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2826 "jint_disjoint_arraycopy"); 2827 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2828 &entry_jint_arraycopy, "jint_arraycopy"); 2829 2830 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2831 "jlong_disjoint_arraycopy"); 2832 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2833 &entry_jlong_arraycopy, "jlong_arraycopy"); 2834 2835 2836 if (UseCompressedOops) { 2837 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2838 "oop_disjoint_arraycopy"); 2839 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2840 &entry_oop_arraycopy, "oop_arraycopy"); 2841 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2842 "oop_disjoint_arraycopy_uninit", 2843 /*dest_uninitialized*/true); 2844 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2845 NULL, "oop_arraycopy_uninit", 2846 /*dest_uninitialized*/true); 2847 } else { 2848 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2849 "oop_disjoint_arraycopy"); 2850 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2851 &entry_oop_arraycopy, "oop_arraycopy"); 2852 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2853 "oop_disjoint_arraycopy_uninit", 2854 /*dest_uninitialized*/true); 2855 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2856 NULL, "oop_arraycopy_uninit", 2857 /*dest_uninitialized*/true); 2858 } 2859 2860 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2861 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2862 /*dest_uninitialized*/true); 2863 2864 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2865 entry_jbyte_arraycopy, 2866 entry_jshort_arraycopy, 2867 entry_jint_arraycopy, 2868 entry_jlong_arraycopy); 2869 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2870 entry_jbyte_arraycopy, 2871 entry_jshort_arraycopy, 2872 entry_jint_arraycopy, 2873 entry_oop_arraycopy, 2874 entry_jlong_arraycopy, 2875 entry_checkcast_arraycopy); 2876 2877 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2878 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2879 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2880 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2881 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2882 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2883 2884 // We don't generate specialized code for HeapWord-aligned source 2885 // arrays, so just use the code we've already generated 2886 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2887 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2888 2889 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2890 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2891 2892 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2893 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2894 2895 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2896 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2897 2898 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2899 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2900 2901 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2902 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2903 } 2904 2905 void generate_math_stubs() { 2906 { 2907 StubCodeMark mark(this, "StubRoutines", "log"); 2908 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2909 2910 __ subq(rsp, 8); 2911 __ movdbl(Address(rsp, 0), xmm0); 2912 __ fld_d(Address(rsp, 0)); 2913 __ flog(); 2914 __ fstp_d(Address(rsp, 0)); 2915 __ movdbl(xmm0, Address(rsp, 0)); 2916 __ addq(rsp, 8); 2917 __ ret(0); 2918 } 2919 { 2920 StubCodeMark mark(this, "StubRoutines", "log10"); 2921 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2922 2923 __ subq(rsp, 8); 2924 __ movdbl(Address(rsp, 0), xmm0); 2925 __ fld_d(Address(rsp, 0)); 2926 __ flog10(); 2927 __ fstp_d(Address(rsp, 0)); 2928 __ movdbl(xmm0, Address(rsp, 0)); 2929 __ addq(rsp, 8); 2930 __ ret(0); 2931 } 2932 { 2933 StubCodeMark mark(this, "StubRoutines", "sin"); 2934 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2935 2936 __ subq(rsp, 8); 2937 __ movdbl(Address(rsp, 0), xmm0); 2938 __ fld_d(Address(rsp, 0)); 2939 __ trigfunc('s'); 2940 __ fstp_d(Address(rsp, 0)); 2941 __ movdbl(xmm0, Address(rsp, 0)); 2942 __ addq(rsp, 8); 2943 __ ret(0); 2944 } 2945 { 2946 StubCodeMark mark(this, "StubRoutines", "cos"); 2947 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2948 2949 __ subq(rsp, 8); 2950 __ movdbl(Address(rsp, 0), xmm0); 2951 __ fld_d(Address(rsp, 0)); 2952 __ trigfunc('c'); 2953 __ fstp_d(Address(rsp, 0)); 2954 __ movdbl(xmm0, Address(rsp, 0)); 2955 __ addq(rsp, 8); 2956 __ ret(0); 2957 } 2958 { 2959 StubCodeMark mark(this, "StubRoutines", "tan"); 2960 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2961 2962 __ subq(rsp, 8); 2963 __ movdbl(Address(rsp, 0), xmm0); 2964 __ fld_d(Address(rsp, 0)); 2965 __ trigfunc('t'); 2966 __ fstp_d(Address(rsp, 0)); 2967 __ movdbl(xmm0, Address(rsp, 0)); 2968 __ addq(rsp, 8); 2969 __ ret(0); 2970 } 2971 { 2972 StubCodeMark mark(this, "StubRoutines", "exp"); 2973 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 2974 2975 __ subq(rsp, 8); 2976 __ movdbl(Address(rsp, 0), xmm0); 2977 __ fld_d(Address(rsp, 0)); 2978 __ exp_with_fallback(0); 2979 __ fstp_d(Address(rsp, 0)); 2980 __ movdbl(xmm0, Address(rsp, 0)); 2981 __ addq(rsp, 8); 2982 __ ret(0); 2983 } 2984 { 2985 StubCodeMark mark(this, "StubRoutines", "pow"); 2986 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 2987 2988 __ subq(rsp, 8); 2989 __ movdbl(Address(rsp, 0), xmm1); 2990 __ fld_d(Address(rsp, 0)); 2991 __ movdbl(Address(rsp, 0), xmm0); 2992 __ fld_d(Address(rsp, 0)); 2993 __ pow_with_fallback(0); 2994 __ fstp_d(Address(rsp, 0)); 2995 __ movdbl(xmm0, Address(rsp, 0)); 2996 __ addq(rsp, 8); 2997 __ ret(0); 2998 } 2999 } 3000 3001 // AES intrinsic stubs 3002 enum {AESBlockSize = 16}; 3003 3004 address generate_key_shuffle_mask() { 3005 __ align(16); 3006 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3007 address start = __ pc(); 3008 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3009 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3010 return start; 3011 } 3012 3013 // Utility routine for loading a 128-bit key word in little endian format 3014 // can optionally specify that the shuffle mask is already in an xmmregister 3015 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3016 __ movdqu(xmmdst, Address(key, offset)); 3017 if (xmm_shuf_mask != NULL) { 3018 __ pshufb(xmmdst, xmm_shuf_mask); 3019 } else { 3020 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3021 } 3022 } 3023 3024 // Arguments: 3025 // 3026 // Inputs: 3027 // c_rarg0 - source byte array address 3028 // c_rarg1 - destination byte array address 3029 // c_rarg2 - K (key) in little endian int array 3030 // 3031 address generate_aescrypt_encryptBlock() { 3032 assert(UseAES, "need AES instructions and misaligned SSE support"); 3033 __ align(CodeEntryAlignment); 3034 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3035 Label L_doLast; 3036 address start = __ pc(); 3037 3038 const Register from = c_rarg0; // source array address 3039 const Register to = c_rarg1; // destination array address 3040 const Register key = c_rarg2; // key array address 3041 const Register keylen = rax; 3042 3043 const XMMRegister xmm_result = xmm0; 3044 const XMMRegister xmm_key_shuf_mask = xmm1; 3045 // On win64 xmm6-xmm15 must be preserved so don't use them. 3046 const XMMRegister xmm_temp1 = xmm2; 3047 const XMMRegister xmm_temp2 = xmm3; 3048 const XMMRegister xmm_temp3 = xmm4; 3049 const XMMRegister xmm_temp4 = xmm5; 3050 3051 __ enter(); // required for proper stackwalking of RuntimeStub frame 3052 3053 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3054 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3055 3056 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3057 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3058 3059 // For encryption, the java expanded key ordering is just what we need 3060 // we don't know if the key is aligned, hence not using load-execute form 3061 3062 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3063 __ pxor(xmm_result, xmm_temp1); 3064 3065 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3066 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3067 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3068 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3069 3070 __ aesenc(xmm_result, xmm_temp1); 3071 __ aesenc(xmm_result, xmm_temp2); 3072 __ aesenc(xmm_result, xmm_temp3); 3073 __ aesenc(xmm_result, xmm_temp4); 3074 3075 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3076 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3077 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3078 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3079 3080 __ aesenc(xmm_result, xmm_temp1); 3081 __ aesenc(xmm_result, xmm_temp2); 3082 __ aesenc(xmm_result, xmm_temp3); 3083 __ aesenc(xmm_result, xmm_temp4); 3084 3085 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3086 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3087 3088 __ cmpl(keylen, 44); 3089 __ jccb(Assembler::equal, L_doLast); 3090 3091 __ aesenc(xmm_result, xmm_temp1); 3092 __ aesenc(xmm_result, xmm_temp2); 3093 3094 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3095 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3096 3097 __ cmpl(keylen, 52); 3098 __ jccb(Assembler::equal, L_doLast); 3099 3100 __ aesenc(xmm_result, xmm_temp1); 3101 __ aesenc(xmm_result, xmm_temp2); 3102 3103 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3104 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3105 3106 __ BIND(L_doLast); 3107 __ aesenc(xmm_result, xmm_temp1); 3108 __ aesenclast(xmm_result, xmm_temp2); 3109 __ movdqu(Address(to, 0), xmm_result); // store the result 3110 __ xorptr(rax, rax); // return 0 3111 __ leave(); // required for proper stackwalking of RuntimeStub frame 3112 __ ret(0); 3113 3114 return start; 3115 } 3116 3117 3118 // Arguments: 3119 // 3120 // Inputs: 3121 // c_rarg0 - source byte array address 3122 // c_rarg1 - destination byte array address 3123 // c_rarg2 - K (key) in little endian int array 3124 // 3125 address generate_aescrypt_decryptBlock() { 3126 assert(UseAES, "need AES instructions and misaligned SSE support"); 3127 __ align(CodeEntryAlignment); 3128 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3129 Label L_doLast; 3130 address start = __ pc(); 3131 3132 const Register from = c_rarg0; // source array address 3133 const Register to = c_rarg1; // destination array address 3134 const Register key = c_rarg2; // key array address 3135 const Register keylen = rax; 3136 3137 const XMMRegister xmm_result = xmm0; 3138 const XMMRegister xmm_key_shuf_mask = xmm1; 3139 // On win64 xmm6-xmm15 must be preserved so don't use them. 3140 const XMMRegister xmm_temp1 = xmm2; 3141 const XMMRegister xmm_temp2 = xmm3; 3142 const XMMRegister xmm_temp3 = xmm4; 3143 const XMMRegister xmm_temp4 = xmm5; 3144 3145 __ enter(); // required for proper stackwalking of RuntimeStub frame 3146 3147 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3148 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3149 3150 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3151 __ movdqu(xmm_result, Address(from, 0)); 3152 3153 // for decryption java expanded key ordering is rotated one position from what we want 3154 // so we start from 0x10 here and hit 0x00 last 3155 // we don't know if the key is aligned, hence not using load-execute form 3156 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3157 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3158 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3159 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3160 3161 __ pxor (xmm_result, xmm_temp1); 3162 __ aesdec(xmm_result, xmm_temp2); 3163 __ aesdec(xmm_result, xmm_temp3); 3164 __ aesdec(xmm_result, xmm_temp4); 3165 3166 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3167 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3168 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3169 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3170 3171 __ aesdec(xmm_result, xmm_temp1); 3172 __ aesdec(xmm_result, xmm_temp2); 3173 __ aesdec(xmm_result, xmm_temp3); 3174 __ aesdec(xmm_result, xmm_temp4); 3175 3176 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3177 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3178 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3179 3180 __ cmpl(keylen, 44); 3181 __ jccb(Assembler::equal, L_doLast); 3182 3183 __ aesdec(xmm_result, xmm_temp1); 3184 __ aesdec(xmm_result, xmm_temp2); 3185 3186 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3187 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3188 3189 __ cmpl(keylen, 52); 3190 __ jccb(Assembler::equal, L_doLast); 3191 3192 __ aesdec(xmm_result, xmm_temp1); 3193 __ aesdec(xmm_result, xmm_temp2); 3194 3195 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3196 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3197 3198 __ BIND(L_doLast); 3199 __ aesdec(xmm_result, xmm_temp1); 3200 __ aesdec(xmm_result, xmm_temp2); 3201 3202 // for decryption the aesdeclast operation is always on key+0x00 3203 __ aesdeclast(xmm_result, xmm_temp3); 3204 __ movdqu(Address(to, 0), xmm_result); // store the result 3205 __ xorptr(rax, rax); // return 0 3206 __ leave(); // required for proper stackwalking of RuntimeStub frame 3207 __ ret(0); 3208 3209 return start; 3210 } 3211 3212 3213 // Arguments: 3214 // 3215 // Inputs: 3216 // c_rarg0 - source byte array address 3217 // c_rarg1 - destination byte array address 3218 // c_rarg2 - K (key) in little endian int array 3219 // c_rarg3 - r vector byte array address 3220 // c_rarg4 - input length 3221 // 3222 // Output: 3223 // rax - input length 3224 // 3225 address generate_cipherBlockChaining_encryptAESCrypt() { 3226 assert(UseAES, "need AES instructions and misaligned SSE support"); 3227 __ align(CodeEntryAlignment); 3228 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3229 address start = __ pc(); 3230 3231 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3232 const Register from = c_rarg0; // source array address 3233 const Register to = c_rarg1; // destination array address 3234 const Register key = c_rarg2; // key array address 3235 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3236 // and left with the results of the last encryption block 3237 #ifndef _WIN64 3238 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3239 #else 3240 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3241 const Register len_reg = r10; // pick the first volatile windows register 3242 #endif 3243 const Register pos = rax; 3244 3245 // xmm register assignments for the loops below 3246 const XMMRegister xmm_result = xmm0; 3247 const XMMRegister xmm_temp = xmm1; 3248 // keys 0-10 preloaded into xmm2-xmm12 3249 const int XMM_REG_NUM_KEY_FIRST = 2; 3250 const int XMM_REG_NUM_KEY_LAST = 15; 3251 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3252 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3253 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3254 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3255 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3256 3257 __ enter(); // required for proper stackwalking of RuntimeStub frame 3258 3259 #ifdef _WIN64 3260 // on win64, fill len_reg from stack position 3261 __ movl(len_reg, len_mem); 3262 // save the xmm registers which must be preserved 6-15 3263 __ subptr(rsp, -rsp_after_call_off * wordSize); 3264 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3265 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3266 } 3267 #else 3268 __ push(len_reg); // Save 3269 #endif 3270 3271 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3272 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3273 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3274 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3275 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3276 offset += 0x10; 3277 } 3278 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3279 3280 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3281 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3282 __ cmpl(rax, 44); 3283 __ jcc(Assembler::notEqual, L_key_192_256); 3284 3285 // 128 bit code follows here 3286 __ movptr(pos, 0); 3287 __ align(OptoLoopAlignment); 3288 3289 __ BIND(L_loopTop_128); 3290 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3291 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3292 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3293 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3294 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3295 } 3296 __ aesenclast(xmm_result, xmm_key10); 3297 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3298 // no need to store r to memory until we exit 3299 __ addptr(pos, AESBlockSize); 3300 __ subptr(len_reg, AESBlockSize); 3301 __ jcc(Assembler::notEqual, L_loopTop_128); 3302 3303 __ BIND(L_exit); 3304 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3305 3306 #ifdef _WIN64 3307 // restore xmm regs belonging to calling function 3308 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3309 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3310 } 3311 __ movl(rax, len_mem); 3312 #else 3313 __ pop(rax); // return length 3314 #endif 3315 __ leave(); // required for proper stackwalking of RuntimeStub frame 3316 __ ret(0); 3317 3318 __ BIND(L_key_192_256); 3319 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3320 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3321 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3322 __ cmpl(rax, 52); 3323 __ jcc(Assembler::notEqual, L_key_256); 3324 3325 // 192-bit code follows here (could be changed to use more xmm registers) 3326 __ movptr(pos, 0); 3327 __ align(OptoLoopAlignment); 3328 3329 __ BIND(L_loopTop_192); 3330 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3331 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3332 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3333 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3334 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3335 } 3336 __ aesenclast(xmm_result, xmm_key12); 3337 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3338 // no need to store r to memory until we exit 3339 __ addptr(pos, AESBlockSize); 3340 __ subptr(len_reg, AESBlockSize); 3341 __ jcc(Assembler::notEqual, L_loopTop_192); 3342 __ jmp(L_exit); 3343 3344 __ BIND(L_key_256); 3345 // 256-bit code follows here (could be changed to use more xmm registers) 3346 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3347 __ movptr(pos, 0); 3348 __ align(OptoLoopAlignment); 3349 3350 __ BIND(L_loopTop_256); 3351 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3352 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3353 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3354 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3355 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3356 } 3357 load_key(xmm_temp, key, 0xe0); 3358 __ aesenclast(xmm_result, xmm_temp); 3359 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3360 // no need to store r to memory until we exit 3361 __ addptr(pos, AESBlockSize); 3362 __ subptr(len_reg, AESBlockSize); 3363 __ jcc(Assembler::notEqual, L_loopTop_256); 3364 __ jmp(L_exit); 3365 3366 return start; 3367 } 3368 3369 // Safefetch stubs. 3370 void generate_safefetch(const char* name, int size, address* entry, 3371 address* fault_pc, address* continuation_pc) { 3372 // safefetch signatures: 3373 // int SafeFetch32(int* adr, int errValue); 3374 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3375 // 3376 // arguments: 3377 // c_rarg0 = adr 3378 // c_rarg1 = errValue 3379 // 3380 // result: 3381 // PPC_RET = *adr or errValue 3382 3383 StubCodeMark mark(this, "StubRoutines", name); 3384 3385 // Entry point, pc or function descriptor. 3386 *entry = __ pc(); 3387 3388 // Load *adr into c_rarg1, may fault. 3389 *fault_pc = __ pc(); 3390 switch (size) { 3391 case 4: 3392 // int32_t 3393 __ movl(c_rarg1, Address(c_rarg0, 0)); 3394 break; 3395 case 8: 3396 // int64_t 3397 __ movq(c_rarg1, Address(c_rarg0, 0)); 3398 break; 3399 default: 3400 ShouldNotReachHere(); 3401 } 3402 3403 // return errValue or *adr 3404 *continuation_pc = __ pc(); 3405 __ movq(rax, c_rarg1); 3406 __ ret(0); 3407 } 3408 3409 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3410 // to hide instruction latency 3411 // 3412 // Arguments: 3413 // 3414 // Inputs: 3415 // c_rarg0 - source byte array address 3416 // c_rarg1 - destination byte array address 3417 // c_rarg2 - K (key) in little endian int array 3418 // c_rarg3 - r vector byte array address 3419 // c_rarg4 - input length 3420 // 3421 // Output: 3422 // rax - input length 3423 // 3424 3425 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3426 assert(UseAES, "need AES instructions and misaligned SSE support"); 3427 __ align(CodeEntryAlignment); 3428 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3429 address start = __ pc(); 3430 3431 Label L_exit, L_key_192_256, L_key_256; 3432 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3433 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3434 const Register from = c_rarg0; // source array address 3435 const Register to = c_rarg1; // destination array address 3436 const Register key = c_rarg2; // key array address 3437 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3438 // and left with the results of the last encryption block 3439 #ifndef _WIN64 3440 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3441 #else 3442 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3443 const Register len_reg = r10; // pick the first volatile windows register 3444 #endif 3445 const Register pos = rax; 3446 3447 // keys 0-10 preloaded into xmm2-xmm12 3448 const int XMM_REG_NUM_KEY_FIRST = 5; 3449 const int XMM_REG_NUM_KEY_LAST = 15; 3450 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3451 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3452 3453 __ enter(); // required for proper stackwalking of RuntimeStub frame 3454 3455 #ifdef _WIN64 3456 // on win64, fill len_reg from stack position 3457 __ movl(len_reg, len_mem); 3458 // save the xmm registers which must be preserved 6-15 3459 __ subptr(rsp, -rsp_after_call_off * wordSize); 3460 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3461 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3462 } 3463 #else 3464 __ push(len_reg); // Save 3465 #endif 3466 3467 // the java expanded key ordering is rotated one position from what we want 3468 // so we start from 0x10 here and hit 0x00 last 3469 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3470 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3471 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3472 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3473 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3474 offset += 0x10; 3475 } 3476 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3477 3478 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3479 3480 // registers holding the four results in the parallelized loop 3481 const XMMRegister xmm_result0 = xmm0; 3482 const XMMRegister xmm_result1 = xmm2; 3483 const XMMRegister xmm_result2 = xmm3; 3484 const XMMRegister xmm_result3 = xmm4; 3485 3486 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3487 3488 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3489 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3490 __ cmpl(rax, 44); 3491 __ jcc(Assembler::notEqual, L_key_192_256); 3492 3493 3494 // 128-bit code follows here, parallelized 3495 __ movptr(pos, 0); 3496 __ align(OptoLoopAlignment); 3497 __ BIND(L_multiBlock_loopTop_128); 3498 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3499 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3500 3501 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3502 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3503 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3504 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3505 3506 #define DoFour(opc, src_reg) \ 3507 __ opc(xmm_result0, src_reg); \ 3508 __ opc(xmm_result1, src_reg); \ 3509 __ opc(xmm_result2, src_reg); \ 3510 __ opc(xmm_result3, src_reg); 3511 3512 DoFour(pxor, xmm_key_first); 3513 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3514 DoFour(aesdec, as_XMMRegister(rnum)); 3515 } 3516 DoFour(aesdeclast, xmm_key_last); 3517 // for each result, xor with the r vector of previous cipher block 3518 __ pxor(xmm_result0, xmm_prev_block_cipher); 3519 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3520 __ pxor(xmm_result1, xmm_prev_block_cipher); 3521 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3522 __ pxor(xmm_result2, xmm_prev_block_cipher); 3523 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3524 __ pxor(xmm_result3, xmm_prev_block_cipher); 3525 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3526 3527 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3528 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3529 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3530 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3531 3532 __ addptr(pos, 4*AESBlockSize); 3533 __ subptr(len_reg, 4*AESBlockSize); 3534 __ jmp(L_multiBlock_loopTop_128); 3535 3536 // registers used in the non-parallelized loops 3537 // xmm register assignments for the loops below 3538 const XMMRegister xmm_result = xmm0; 3539 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3540 const XMMRegister xmm_key11 = xmm3; 3541 const XMMRegister xmm_key12 = xmm4; 3542 const XMMRegister xmm_temp = xmm4; 3543 3544 __ align(OptoLoopAlignment); 3545 __ BIND(L_singleBlock_loopTop_128); 3546 __ cmpptr(len_reg, 0); // any blocks left?? 3547 __ jcc(Assembler::equal, L_exit); 3548 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3549 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3550 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3551 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3552 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3553 } 3554 __ aesdeclast(xmm_result, xmm_key_last); 3555 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3556 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3557 // no need to store r to memory until we exit 3558 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3559 3560 __ addptr(pos, AESBlockSize); 3561 __ subptr(len_reg, AESBlockSize); 3562 __ jmp(L_singleBlock_loopTop_128); 3563 3564 3565 __ BIND(L_exit); 3566 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3567 #ifdef _WIN64 3568 // restore regs belonging to calling function 3569 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3570 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3571 } 3572 __ movl(rax, len_mem); 3573 #else 3574 __ pop(rax); // return length 3575 #endif 3576 __ leave(); // required for proper stackwalking of RuntimeStub frame 3577 __ ret(0); 3578 3579 3580 __ BIND(L_key_192_256); 3581 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3582 load_key(xmm_key11, key, 0xb0); 3583 __ cmpl(rax, 52); 3584 __ jcc(Assembler::notEqual, L_key_256); 3585 3586 // 192-bit code follows here (could be optimized to use parallelism) 3587 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3588 __ movptr(pos, 0); 3589 __ align(OptoLoopAlignment); 3590 3591 __ BIND(L_singleBlock_loopTop_192); 3592 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3593 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3594 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3595 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3596 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3597 } 3598 __ aesdec(xmm_result, xmm_key11); 3599 __ aesdec(xmm_result, xmm_key12); 3600 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3601 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3602 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3603 // no need to store r to memory until we exit 3604 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3605 __ addptr(pos, AESBlockSize); 3606 __ subptr(len_reg, AESBlockSize); 3607 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3608 __ jmp(L_exit); 3609 3610 __ BIND(L_key_256); 3611 // 256-bit code follows here (could be optimized to use parallelism) 3612 __ movptr(pos, 0); 3613 __ align(OptoLoopAlignment); 3614 3615 __ BIND(L_singleBlock_loopTop_256); 3616 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3617 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3618 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3619 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3620 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3621 } 3622 __ aesdec(xmm_result, xmm_key11); 3623 load_key(xmm_temp, key, 0xc0); 3624 __ aesdec(xmm_result, xmm_temp); 3625 load_key(xmm_temp, key, 0xd0); 3626 __ aesdec(xmm_result, xmm_temp); 3627 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3628 __ aesdec(xmm_result, xmm_temp); 3629 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3630 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3631 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3632 // no need to store r to memory until we exit 3633 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3634 __ addptr(pos, AESBlockSize); 3635 __ subptr(len_reg, AESBlockSize); 3636 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3637 __ jmp(L_exit); 3638 3639 return start; 3640 } 3641 3642 3643 // byte swap x86 long 3644 address generate_ghash_long_swap_mask() { 3645 __ align(CodeEntryAlignment); 3646 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 3647 address start = __ pc(); 3648 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 3649 __ emit_data64(0x0706050403020100, relocInfo::none ); 3650 return start; 3651 } 3652 3653 // byte swap x86 byte array 3654 address generate_ghash_byte_swap_mask() { 3655 __ align(CodeEntryAlignment); 3656 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 3657 address start = __ pc(); 3658 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 3659 __ emit_data64(0x0001020304050607, relocInfo::none ); 3660 return start; 3661 } 3662 3663 /* Single and multi-block ghash operations */ 3664 address generate_ghash_processBlocks() { 3665 __ align(CodeEntryAlignment); 3666 Label L_ghash_loop, L_exit; 3667 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 3668 address start = __ pc(); 3669 3670 const Register state = c_rarg0; 3671 const Register subkeyH = c_rarg1; 3672 const Register data = c_rarg2; 3673 const Register blocks = c_rarg3; 3674 3675 #ifdef _WIN64 3676 const int XMM_REG_LAST = 10; 3677 #endif 3678 3679 const XMMRegister xmm_temp0 = xmm0; 3680 const XMMRegister xmm_temp1 = xmm1; 3681 const XMMRegister xmm_temp2 = xmm2; 3682 const XMMRegister xmm_temp3 = xmm3; 3683 const XMMRegister xmm_temp4 = xmm4; 3684 const XMMRegister xmm_temp5 = xmm5; 3685 const XMMRegister xmm_temp6 = xmm6; 3686 const XMMRegister xmm_temp7 = xmm7; 3687 const XMMRegister xmm_temp8 = xmm8; 3688 const XMMRegister xmm_temp9 = xmm9; 3689 const XMMRegister xmm_temp10 = xmm10; 3690 3691 __ enter(); 3692 3693 #ifdef _WIN64 3694 // save the xmm registers which must be preserved 6-10 3695 __ subptr(rsp, -rsp_after_call_off * wordSize); 3696 for (int i = 6; i <= XMM_REG_LAST; i++) { 3697 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3698 } 3699 #endif 3700 3701 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3702 3703 __ movdqu(xmm_temp0, Address(state, 0)); 3704 __ pshufb(xmm_temp0, xmm_temp10); 3705 3706 3707 __ BIND(L_ghash_loop); 3708 __ movdqu(xmm_temp2, Address(data, 0)); 3709 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 3710 3711 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 3712 __ pshufb(xmm_temp1, xmm_temp10); 3713 3714 __ pxor(xmm_temp0, xmm_temp2); 3715 3716 // 3717 // Multiply with the hash key 3718 // 3719 __ movdqu(xmm_temp3, xmm_temp0); 3720 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 3721 __ movdqu(xmm_temp4, xmm_temp0); 3722 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 3723 3724 __ movdqu(xmm_temp5, xmm_temp0); 3725 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 3726 __ movdqu(xmm_temp6, xmm_temp0); 3727 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 3728 3729 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 3730 3731 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 3732 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 3733 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 3734 __ pxor(xmm_temp3, xmm_temp5); 3735 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 3736 // of the carry-less multiplication of 3737 // xmm0 by xmm1. 3738 3739 // We shift the result of the multiplication by one bit position 3740 // to the left to cope for the fact that the bits are reversed. 3741 __ movdqu(xmm_temp7, xmm_temp3); 3742 __ movdqu(xmm_temp8, xmm_temp6); 3743 __ pslld(xmm_temp3, 1); 3744 __ pslld(xmm_temp6, 1); 3745 __ psrld(xmm_temp7, 31); 3746 __ psrld(xmm_temp8, 31); 3747 __ movdqu(xmm_temp9, xmm_temp7); 3748 __ pslldq(xmm_temp8, 4); 3749 __ pslldq(xmm_temp7, 4); 3750 __ psrldq(xmm_temp9, 12); 3751 __ por(xmm_temp3, xmm_temp7); 3752 __ por(xmm_temp6, xmm_temp8); 3753 __ por(xmm_temp6, xmm_temp9); 3754 3755 // 3756 // First phase of the reduction 3757 // 3758 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 3759 // independently. 3760 __ movdqu(xmm_temp7, xmm_temp3); 3761 __ movdqu(xmm_temp8, xmm_temp3); 3762 __ movdqu(xmm_temp9, xmm_temp3); 3763 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 3764 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 3765 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 3766 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 3767 __ pxor(xmm_temp7, xmm_temp9); 3768 __ movdqu(xmm_temp8, xmm_temp7); 3769 __ pslldq(xmm_temp7, 12); 3770 __ psrldq(xmm_temp8, 4); 3771 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 3772 3773 // 3774 // Second phase of the reduction 3775 // 3776 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 3777 // shift operations. 3778 __ movdqu(xmm_temp2, xmm_temp3); 3779 __ movdqu(xmm_temp4, xmm_temp3); 3780 __ movdqu(xmm_temp5, xmm_temp3); 3781 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 3782 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 3783 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 3784 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 3785 __ pxor(xmm_temp2, xmm_temp5); 3786 __ pxor(xmm_temp2, xmm_temp8); 3787 __ pxor(xmm_temp3, xmm_temp2); 3788 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 3789 3790 __ decrement(blocks); 3791 __ jcc(Assembler::zero, L_exit); 3792 __ movdqu(xmm_temp0, xmm_temp6); 3793 __ addptr(data, 16); 3794 __ jmp(L_ghash_loop); 3795 3796 __ BIND(L_exit); 3797 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 3798 __ movdqu(Address(state, 0), xmm_temp6); // store the result 3799 3800 #ifdef _WIN64 3801 // restore xmm regs belonging to calling function 3802 for (int i = 6; i <= XMM_REG_LAST; i++) { 3803 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3804 } 3805 #endif 3806 __ leave(); 3807 __ ret(0); 3808 return start; 3809 } 3810 3811 /** 3812 * Arguments: 3813 * 3814 * Inputs: 3815 * c_rarg0 - int crc 3816 * c_rarg1 - byte* buf 3817 * c_rarg2 - int length 3818 * 3819 * Ouput: 3820 * rax - int crc result 3821 */ 3822 address generate_updateBytesCRC32() { 3823 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3824 3825 __ align(CodeEntryAlignment); 3826 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3827 3828 address start = __ pc(); 3829 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3830 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3831 // rscratch1: r10 3832 const Register crc = c_rarg0; // crc 3833 const Register buf = c_rarg1; // source java byte array address 3834 const Register len = c_rarg2; // length 3835 const Register table = c_rarg3; // crc_table address (reuse register) 3836 const Register tmp = r11; 3837 assert_different_registers(crc, buf, len, table, tmp, rax); 3838 3839 BLOCK_COMMENT("Entry:"); 3840 __ enter(); // required for proper stackwalking of RuntimeStub frame 3841 3842 __ kernel_crc32(crc, buf, len, table, tmp); 3843 3844 __ movl(rax, crc); 3845 __ leave(); // required for proper stackwalking of RuntimeStub frame 3846 __ ret(0); 3847 3848 return start; 3849 } 3850 3851 3852 /** 3853 * Arguments: 3854 * 3855 * Input: 3856 * c_rarg0 - x address 3857 * c_rarg1 - x length 3858 * c_rarg2 - y address 3859 * c_rarg3 - y lenth 3860 * not Win64 3861 * c_rarg4 - z address 3862 * c_rarg5 - z length 3863 * Win64 3864 * rsp+40 - z address 3865 * rsp+48 - z length 3866 */ 3867 address generate_multiplyToLen() { 3868 __ align(CodeEntryAlignment); 3869 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3870 3871 address start = __ pc(); 3872 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3873 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3874 const Register x = rdi; 3875 const Register xlen = rax; 3876 const Register y = rsi; 3877 const Register ylen = rcx; 3878 const Register z = r8; 3879 const Register zlen = r11; 3880 3881 // Next registers will be saved on stack in multiply_to_len(). 3882 const Register tmp1 = r12; 3883 const Register tmp2 = r13; 3884 const Register tmp3 = r14; 3885 const Register tmp4 = r15; 3886 const Register tmp5 = rbx; 3887 3888 BLOCK_COMMENT("Entry:"); 3889 __ enter(); // required for proper stackwalking of RuntimeStub frame 3890 3891 #ifndef _WIN64 3892 __ movptr(zlen, r9); // Save r9 in r11 - zlen 3893 #endif 3894 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 3895 // ylen => rcx, z => r8, zlen => r11 3896 // r9 and r10 may be used to save non-volatile registers 3897 #ifdef _WIN64 3898 // last 2 arguments (#4, #5) are on stack on Win64 3899 __ movptr(z, Address(rsp, 6 * wordSize)); 3900 __ movptr(zlen, Address(rsp, 7 * wordSize)); 3901 #endif 3902 3903 __ movptr(xlen, rsi); 3904 __ movptr(y, rdx); 3905 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 3906 3907 restore_arg_regs(); 3908 3909 __ leave(); // required for proper stackwalking of RuntimeStub frame 3910 __ ret(0); 3911 3912 return start; 3913 } 3914 3915 /** 3916 * Arguments: 3917 * 3918 // Input: 3919 // c_rarg0 - x address 3920 // c_rarg1 - x length 3921 // c_rarg2 - z address 3922 // c_rarg3 - z lenth 3923 * 3924 */ 3925 address generate_squareToLen() { 3926 3927 __ align(CodeEntryAlignment); 3928 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 3929 3930 address start = __ pc(); 3931 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3932 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 3933 const Register x = rdi; 3934 const Register len = rsi; 3935 const Register z = r8; 3936 const Register zlen = rcx; 3937 3938 const Register tmp1 = r12; 3939 const Register tmp2 = r13; 3940 const Register tmp3 = r14; 3941 const Register tmp4 = r15; 3942 const Register tmp5 = rbx; 3943 3944 BLOCK_COMMENT("Entry:"); 3945 __ enter(); // required for proper stackwalking of RuntimeStub frame 3946 3947 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 3948 // zlen => rcx 3949 // r9 and r10 may be used to save non-volatile registers 3950 __ movptr(r8, rdx); 3951 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 3952 3953 restore_arg_regs(); 3954 3955 __ leave(); // required for proper stackwalking of RuntimeStub frame 3956 __ ret(0); 3957 3958 return start; 3959 } 3960 3961 /** 3962 * Arguments: 3963 * 3964 * Input: 3965 * c_rarg0 - out address 3966 * c_rarg1 - in address 3967 * c_rarg2 - offset 3968 * c_rarg3 - len 3969 * not Win64 3970 * c_rarg4 - k 3971 * Win64 3972 * rsp+40 - k 3973 */ 3974 address generate_mulAdd() { 3975 __ align(CodeEntryAlignment); 3976 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 3977 3978 address start = __ pc(); 3979 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3980 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3981 const Register out = rdi; 3982 const Register in = rsi; 3983 const Register offset = r11; 3984 const Register len = rcx; 3985 const Register k = r8; 3986 3987 // Next registers will be saved on stack in mul_add(). 3988 const Register tmp1 = r12; 3989 const Register tmp2 = r13; 3990 const Register tmp3 = r14; 3991 const Register tmp4 = r15; 3992 const Register tmp5 = rbx; 3993 3994 BLOCK_COMMENT("Entry:"); 3995 __ enter(); // required for proper stackwalking of RuntimeStub frame 3996 3997 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 3998 // len => rcx, k => r8 3999 // r9 and r10 may be used to save non-volatile registers 4000 #ifdef _WIN64 4001 // last argument is on stack on Win64 4002 __ movl(k, Address(rsp, 6 * wordSize)); 4003 #endif 4004 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4005 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4006 4007 restore_arg_regs(); 4008 4009 __ leave(); // required for proper stackwalking of RuntimeStub frame 4010 __ ret(0); 4011 4012 return start; 4013 } 4014 4015 4016 #undef __ 4017 #define __ masm-> 4018 4019 // Continuation point for throwing of implicit exceptions that are 4020 // not handled in the current activation. Fabricates an exception 4021 // oop and initiates normal exception dispatching in this 4022 // frame. Since we need to preserve callee-saved values (currently 4023 // only for C2, but done for C1 as well) we need a callee-saved oop 4024 // map and therefore have to make these stubs into RuntimeStubs 4025 // rather than BufferBlobs. If the compiler needs all registers to 4026 // be preserved between the fault point and the exception handler 4027 // then it must assume responsibility for that in 4028 // AbstractCompiler::continuation_for_implicit_null_exception or 4029 // continuation_for_implicit_division_by_zero_exception. All other 4030 // implicit exceptions (e.g., NullPointerException or 4031 // AbstractMethodError on entry) are either at call sites or 4032 // otherwise assume that stack unwinding will be initiated, so 4033 // caller saved registers were assumed volatile in the compiler. 4034 address generate_throw_exception(const char* name, 4035 address runtime_entry, 4036 Register arg1 = noreg, 4037 Register arg2 = noreg) { 4038 // Information about frame layout at time of blocking runtime call. 4039 // Note that we only have to preserve callee-saved registers since 4040 // the compilers are responsible for supplying a continuation point 4041 // if they expect all registers to be preserved. 4042 enum layout { 4043 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4044 rbp_off2, 4045 return_off, 4046 return_off2, 4047 framesize // inclusive of return address 4048 }; 4049 4050 int insts_size = 512; 4051 int locs_size = 64; 4052 4053 CodeBuffer code(name, insts_size, locs_size); 4054 OopMapSet* oop_maps = new OopMapSet(); 4055 MacroAssembler* masm = new MacroAssembler(&code); 4056 4057 address start = __ pc(); 4058 4059 // This is an inlined and slightly modified version of call_VM 4060 // which has the ability to fetch the return PC out of 4061 // thread-local storage and also sets up last_Java_sp slightly 4062 // differently than the real call_VM 4063 4064 __ enter(); // required for proper stackwalking of RuntimeStub frame 4065 4066 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4067 4068 // return address and rbp are already in place 4069 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4070 4071 int frame_complete = __ pc() - start; 4072 4073 // Set up last_Java_sp and last_Java_fp 4074 address the_pc = __ pc(); 4075 __ set_last_Java_frame(rsp, rbp, the_pc); 4076 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4077 4078 // Call runtime 4079 if (arg1 != noreg) { 4080 assert(arg2 != c_rarg1, "clobbered"); 4081 __ movptr(c_rarg1, arg1); 4082 } 4083 if (arg2 != noreg) { 4084 __ movptr(c_rarg2, arg2); 4085 } 4086 __ movptr(c_rarg0, r15_thread); 4087 BLOCK_COMMENT("call runtime_entry"); 4088 __ call(RuntimeAddress(runtime_entry)); 4089 4090 // Generate oop map 4091 OopMap* map = new OopMap(framesize, 0); 4092 4093 oop_maps->add_gc_map(the_pc - start, map); 4094 4095 __ reset_last_Java_frame(true); 4096 4097 __ leave(); // required for proper stackwalking of RuntimeStub frame 4098 4099 // check for pending exceptions 4100 #ifdef ASSERT 4101 Label L; 4102 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4103 (int32_t) NULL_WORD); 4104 __ jcc(Assembler::notEqual, L); 4105 __ should_not_reach_here(); 4106 __ bind(L); 4107 #endif // ASSERT 4108 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4109 4110 4111 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4112 RuntimeStub* stub = 4113 RuntimeStub::new_runtime_stub(name, 4114 &code, 4115 frame_complete, 4116 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4117 oop_maps, false); 4118 return stub->entry_point(); 4119 } 4120 4121 void create_control_words() { 4122 // Round to nearest, 53-bit mode, exceptions masked 4123 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4124 // Round to zero, 53-bit mode, exception mased 4125 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4126 // Round to nearest, 24-bit mode, exceptions masked 4127 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4128 // Round to nearest, 64-bit mode, exceptions masked 4129 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4130 // Round to nearest, 64-bit mode, exceptions masked 4131 StubRoutines::_mxcsr_std = 0x1F80; 4132 // Note: the following two constants are 80-bit values 4133 // layout is critical for correct loading by FPU. 4134 // Bias for strict fp multiply/divide 4135 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4136 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4137 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4138 // Un-Bias for strict fp multiply/divide 4139 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4140 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4141 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4142 } 4143 4144 // Initialization 4145 void generate_initial() { 4146 // Generates all stubs and initializes the entry points 4147 4148 // This platform-specific settings are needed by generate_call_stub() 4149 create_control_words(); 4150 4151 // entry points that exist in all platforms Note: This is code 4152 // that could be shared among different platforms - however the 4153 // benefit seems to be smaller than the disadvantage of having a 4154 // much more complicated generator structure. See also comment in 4155 // stubRoutines.hpp. 4156 4157 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4158 4159 StubRoutines::_call_stub_entry = 4160 generate_call_stub(StubRoutines::_call_stub_return_address); 4161 4162 // is referenced by megamorphic call 4163 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4164 4165 // atomic calls 4166 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4167 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 4168 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4169 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4170 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4171 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 4172 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4173 4174 StubRoutines::_handler_for_unsafe_access_entry = 4175 generate_handler_for_unsafe_access(); 4176 4177 // platform dependent 4178 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4179 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4180 4181 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4182 4183 // Build this early so it's available for the interpreter. 4184 StubRoutines::_throw_StackOverflowError_entry = 4185 generate_throw_exception("StackOverflowError throw_exception", 4186 CAST_FROM_FN_PTR(address, 4187 SharedRuntime:: 4188 throw_StackOverflowError)); 4189 if (UseCRC32Intrinsics) { 4190 // set table address before stub generation which use it 4191 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4192 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4193 } 4194 } 4195 4196 void generate_all() { 4197 // Generates all stubs and initializes the entry points 4198 4199 // These entry points require SharedInfo::stack0 to be set up in 4200 // non-core builds and need to be relocatable, so they each 4201 // fabricate a RuntimeStub internally. 4202 StubRoutines::_throw_AbstractMethodError_entry = 4203 generate_throw_exception("AbstractMethodError throw_exception", 4204 CAST_FROM_FN_PTR(address, 4205 SharedRuntime:: 4206 throw_AbstractMethodError)); 4207 4208 StubRoutines::_throw_IncompatibleClassChangeError_entry = 4209 generate_throw_exception("IncompatibleClassChangeError throw_exception", 4210 CAST_FROM_FN_PTR(address, 4211 SharedRuntime:: 4212 throw_IncompatibleClassChangeError)); 4213 4214 StubRoutines::_throw_NullPointerException_at_call_entry = 4215 generate_throw_exception("NullPointerException at call throw_exception", 4216 CAST_FROM_FN_PTR(address, 4217 SharedRuntime:: 4218 throw_NullPointerException_at_call)); 4219 4220 // entry points that are platform specific 4221 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 4222 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 4223 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 4224 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 4225 4226 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4227 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4228 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4229 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4230 4231 // support for verify_oop (must happen after universe_init) 4232 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4233 4234 // arraycopy stubs used by compilers 4235 generate_arraycopy_stubs(); 4236 4237 generate_math_stubs(); 4238 4239 // don't bother generating these AES intrinsic stubs unless global flag is set 4240 if (UseAESIntrinsics) { 4241 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4242 4243 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4244 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4245 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4246 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4247 } 4248 4249 // Generate GHASH intrinsics code 4250 if (UseGHASHIntrinsics) { 4251 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 4252 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 4253 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 4254 } 4255 4256 // Safefetch stubs. 4257 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4258 &StubRoutines::_safefetch32_fault_pc, 4259 &StubRoutines::_safefetch32_continuation_pc); 4260 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4261 &StubRoutines::_safefetchN_fault_pc, 4262 &StubRoutines::_safefetchN_continuation_pc); 4263 #ifdef COMPILER2 4264 if (UseMultiplyToLenIntrinsic) { 4265 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4266 } 4267 if (UseSquareToLenIntrinsic) { 4268 StubRoutines::_squareToLen = generate_squareToLen(); 4269 } 4270 if (UseMulAddIntrinsic) { 4271 StubRoutines::_mulAdd = generate_mulAdd(); 4272 } 4273 4274 // Visual Studio 2017 (and higher) has the compiler instrinisics required 4275 #if !(defined(_WINDOWS) && _MSC_VER < 1910) 4276 if (UseMontgomeryMultiplyIntrinsic) { 4277 StubRoutines::_montgomeryMultiply 4278 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 4279 } 4280 if (UseMontgomerySquareIntrinsic) { 4281 StubRoutines::_montgomerySquare 4282 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 4283 } 4284 #endif //! VC++ < 2017 4285 #endif // COMPILER2 4286 } 4287 4288 public: 4289 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4290 if (all) { 4291 generate_all(); 4292 } else { 4293 generate_initial(); 4294 } 4295 } 4296 }; // end class declaration 4297 4298 void StubGenerator_generate(CodeBuffer* code, bool all) { 4299 StubGenerator g(code, all); 4300 } --- EOF ---