1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shenandoah/brooksPointer.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "nativeInst_x86.hpp" 34 #include "oops/instanceOop.hpp" 35 #include "oops/method.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubCodeGenerator.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/thread.inline.hpp" 45 #ifdef COMPILER2 46 #include "opto/runtime.hpp" 47 #endif 48 49 // Declaration and definition of StubGenerator (no .hpp file). 50 // For a more detailed description of the stub routine structure 51 // see the comment in stubRoutines.hpp 52 53 #define __ _masm-> 54 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 55 #define a__ ((Assembler*)_masm)-> 56 57 #ifdef PRODUCT 58 #define BLOCK_COMMENT(str) /* nothing */ 59 #else 60 #define BLOCK_COMMENT(str) __ block_comment(str) 61 #endif 62 63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 64 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 65 66 // Stub Code definitions 67 68 class StubGenerator: public StubCodeGenerator { 69 private: 70 71 #ifdef PRODUCT 72 #define inc_counter_np(counter) ((void)0) 73 #else 74 void inc_counter_np_(int& counter) { 75 // This can destroy rscratch1 if counter is far from the code cache 76 __ incrementl(ExternalAddress((address)&counter)); 77 } 78 #define inc_counter_np(counter) \ 79 BLOCK_COMMENT("inc_counter " #counter); \ 80 inc_counter_np_(counter); 81 #endif 82 83 // Call stubs are used to call Java from C 84 // 85 // Linux Arguments: 86 // c_rarg0: call wrapper address address 87 // c_rarg1: result address 88 // c_rarg2: result type BasicType 89 // c_rarg3: method Method* 90 // c_rarg4: (interpreter) entry point address 91 // c_rarg5: parameters intptr_t* 92 // 16(rbp): parameter size (in words) int 93 // 24(rbp): thread Thread* 94 // 95 // [ return_from_Java ] <--- rsp 96 // [ argument word n ] 97 // ... 98 // -12 [ argument word 1 ] 99 // -11 [ saved r15 ] <--- rsp_after_call 100 // -10 [ saved r14 ] 101 // -9 [ saved r13 ] 102 // -8 [ saved r12 ] 103 // -7 [ saved rbx ] 104 // -6 [ call wrapper ] 105 // -5 [ result ] 106 // -4 [ result type ] 107 // -3 [ method ] 108 // -2 [ entry point ] 109 // -1 [ parameters ] 110 // 0 [ saved rbp ] <--- rbp 111 // 1 [ return address ] 112 // 2 [ parameter size ] 113 // 3 [ thread ] 114 // 115 // Windows Arguments: 116 // c_rarg0: call wrapper address address 117 // c_rarg1: result address 118 // c_rarg2: result type BasicType 119 // c_rarg3: method Method* 120 // 48(rbp): (interpreter) entry point address 121 // 56(rbp): parameters intptr_t* 122 // 64(rbp): parameter size (in words) int 123 // 72(rbp): thread Thread* 124 // 125 // [ return_from_Java ] <--- rsp 126 // [ argument word n ] 127 // ... 128 // -60 [ argument word 1 ] 129 // -59 [ saved xmm31 ] <--- rsp after_call 130 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 131 // -27 [ saved xmm15 ] 132 // [ saved xmm7-xmm14 ] 133 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 134 // -7 [ saved r15 ] 135 // -6 [ saved r14 ] 136 // -5 [ saved r13 ] 137 // -4 [ saved r12 ] 138 // -3 [ saved rdi ] 139 // -2 [ saved rsi ] 140 // -1 [ saved rbx ] 141 // 0 [ saved rbp ] <--- rbp 142 // 1 [ return address ] 143 // 2 [ call wrapper ] 144 // 3 [ result ] 145 // 4 [ result type ] 146 // 5 [ method ] 147 // 6 [ entry point ] 148 // 7 [ parameters ] 149 // 8 [ parameter size ] 150 // 9 [ thread ] 151 // 152 // Windows reserves the callers stack space for arguments 1-4. 153 // We spill c_rarg0-c_rarg3 to this space. 154 155 // Call stub stack layout word offsets from rbp 156 enum call_stub_layout { 157 #ifdef _WIN64 158 xmm_save_first = 6, // save from xmm6 159 xmm_save_last = 31, // to xmm31 160 xmm_save_base = -9, 161 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 162 r15_off = -7, 163 r14_off = -6, 164 r13_off = -5, 165 r12_off = -4, 166 rdi_off = -3, 167 rsi_off = -2, 168 rbx_off = -1, 169 rbp_off = 0, 170 retaddr_off = 1, 171 call_wrapper_off = 2, 172 result_off = 3, 173 result_type_off = 4, 174 method_off = 5, 175 entry_point_off = 6, 176 parameters_off = 7, 177 parameter_size_off = 8, 178 thread_off = 9 179 #else 180 rsp_after_call_off = -12, 181 mxcsr_off = rsp_after_call_off, 182 r15_off = -11, 183 r14_off = -10, 184 r13_off = -9, 185 r12_off = -8, 186 rbx_off = -7, 187 call_wrapper_off = -6, 188 result_off = -5, 189 result_type_off = -4, 190 method_off = -3, 191 entry_point_off = -2, 192 parameters_off = -1, 193 rbp_off = 0, 194 retaddr_off = 1, 195 parameter_size_off = 2, 196 thread_off = 3 197 #endif 198 }; 199 200 #ifdef _WIN64 201 Address xmm_save(int reg) { 202 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 203 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 204 } 205 #endif 206 207 address generate_call_stub(address& return_address) { 208 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 209 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 210 "adjust this code"); 211 StubCodeMark mark(this, "StubRoutines", "call_stub"); 212 address start = __ pc(); 213 214 // same as in generate_catch_exception()! 215 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 216 217 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 218 const Address result (rbp, result_off * wordSize); 219 const Address result_type (rbp, result_type_off * wordSize); 220 const Address method (rbp, method_off * wordSize); 221 const Address entry_point (rbp, entry_point_off * wordSize); 222 const Address parameters (rbp, parameters_off * wordSize); 223 const Address parameter_size(rbp, parameter_size_off * wordSize); 224 225 // same as in generate_catch_exception()! 226 const Address thread (rbp, thread_off * wordSize); 227 228 const Address r15_save(rbp, r15_off * wordSize); 229 const Address r14_save(rbp, r14_off * wordSize); 230 const Address r13_save(rbp, r13_off * wordSize); 231 const Address r12_save(rbp, r12_off * wordSize); 232 const Address rbx_save(rbp, rbx_off * wordSize); 233 234 // stub code 235 __ enter(); 236 __ subptr(rsp, -rsp_after_call_off * wordSize); 237 238 // save register parameters 239 #ifndef _WIN64 240 __ movptr(parameters, c_rarg5); // parameters 241 __ movptr(entry_point, c_rarg4); // entry_point 242 #endif 243 244 __ movptr(method, c_rarg3); // method 245 __ movl(result_type, c_rarg2); // result type 246 __ movptr(result, c_rarg1); // result 247 __ movptr(call_wrapper, c_rarg0); // call wrapper 248 249 // save regs belonging to calling function 250 __ movptr(rbx_save, rbx); 251 __ movptr(r12_save, r12); 252 __ movptr(r13_save, r13); 253 __ movptr(r14_save, r14); 254 __ movptr(r15_save, r15); 255 if (UseAVX > 2) { 256 __ movl(rbx, 0xffff); 257 __ kmovwl(k1, rbx); 258 } 259 #ifdef _WIN64 260 int last_reg = 15; 261 if (UseAVX > 2) { 262 last_reg = 31; 263 } 264 if (VM_Version::supports_evex()) { 265 for (int i = xmm_save_first; i <= last_reg; i++) { 266 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 267 } 268 } else { 269 for (int i = xmm_save_first; i <= last_reg; i++) { 270 __ movdqu(xmm_save(i), as_XMMRegister(i)); 271 } 272 } 273 274 const Address rdi_save(rbp, rdi_off * wordSize); 275 const Address rsi_save(rbp, rsi_off * wordSize); 276 277 __ movptr(rsi_save, rsi); 278 __ movptr(rdi_save, rdi); 279 #else 280 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 281 { 282 Label skip_ldmx; 283 __ stmxcsr(mxcsr_save); 284 __ movl(rax, mxcsr_save); 285 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 286 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 287 __ cmp32(rax, mxcsr_std); 288 __ jcc(Assembler::equal, skip_ldmx); 289 __ ldmxcsr(mxcsr_std); 290 __ bind(skip_ldmx); 291 } 292 #endif 293 294 // Load up thread register 295 __ movptr(r15_thread, thread); 296 __ reinit_heapbase(); 297 298 #ifdef ASSERT 299 // make sure we have no pending exceptions 300 { 301 Label L; 302 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 303 __ jcc(Assembler::equal, L); 304 __ stop("StubRoutines::call_stub: entered with pending exception"); 305 __ bind(L); 306 } 307 #endif 308 309 // pass parameters if any 310 BLOCK_COMMENT("pass parameters if any"); 311 Label parameters_done; 312 __ movl(c_rarg3, parameter_size); 313 __ testl(c_rarg3, c_rarg3); 314 __ jcc(Assembler::zero, parameters_done); 315 316 Label loop; 317 __ movptr(c_rarg2, parameters); // parameter pointer 318 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 319 __ BIND(loop); 320 __ movptr(rax, Address(c_rarg2, 0));// get parameter 321 __ addptr(c_rarg2, wordSize); // advance to next parameter 322 __ decrementl(c_rarg1); // decrement counter 323 __ push(rax); // pass parameter 324 __ jcc(Assembler::notZero, loop); 325 326 // call Java function 327 __ BIND(parameters_done); 328 __ movptr(rbx, method); // get Method* 329 __ movptr(c_rarg1, entry_point); // get entry_point 330 __ mov(r13, rsp); // set sender sp 331 BLOCK_COMMENT("call Java function"); 332 __ call(c_rarg1); 333 334 BLOCK_COMMENT("call_stub_return_address:"); 335 return_address = __ pc(); 336 337 // store result depending on type (everything that is not 338 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 339 __ movptr(c_rarg0, result); 340 Label is_long, is_float, is_double, exit; 341 __ movl(c_rarg1, result_type); 342 __ cmpl(c_rarg1, T_OBJECT); 343 __ jcc(Assembler::equal, is_long); 344 __ cmpl(c_rarg1, T_LONG); 345 __ jcc(Assembler::equal, is_long); 346 __ cmpl(c_rarg1, T_FLOAT); 347 __ jcc(Assembler::equal, is_float); 348 __ cmpl(c_rarg1, T_DOUBLE); 349 __ jcc(Assembler::equal, is_double); 350 351 // handle T_INT case 352 __ movl(Address(c_rarg0, 0), rax); 353 354 __ BIND(exit); 355 356 // pop parameters 357 __ lea(rsp, rsp_after_call); 358 359 #ifdef ASSERT 360 // verify that threads correspond 361 { 362 Label L1, L2, L3; 363 __ cmpptr(r15_thread, thread); 364 __ jcc(Assembler::equal, L1); 365 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 366 __ bind(L1); 367 __ get_thread(rbx); 368 __ cmpptr(r15_thread, thread); 369 __ jcc(Assembler::equal, L2); 370 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 371 __ bind(L2); 372 __ cmpptr(r15_thread, rbx); 373 __ jcc(Assembler::equal, L3); 374 __ stop("StubRoutines::call_stub: threads must correspond"); 375 __ bind(L3); 376 } 377 #endif 378 379 // restore regs belonging to calling function 380 #ifdef _WIN64 381 // emit the restores for xmm regs 382 if (VM_Version::supports_evex()) { 383 for (int i = xmm_save_first; i <= last_reg; i++) { 384 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 385 } 386 } else { 387 for (int i = xmm_save_first; i <= last_reg; i++) { 388 __ movdqu(as_XMMRegister(i), xmm_save(i)); 389 } 390 } 391 #endif 392 __ movptr(r15, r15_save); 393 __ movptr(r14, r14_save); 394 __ movptr(r13, r13_save); 395 __ movptr(r12, r12_save); 396 __ movptr(rbx, rbx_save); 397 398 #ifdef _WIN64 399 __ movptr(rdi, rdi_save); 400 __ movptr(rsi, rsi_save); 401 #else 402 __ ldmxcsr(mxcsr_save); 403 #endif 404 405 // restore rsp 406 __ addptr(rsp, -rsp_after_call_off * wordSize); 407 408 // return 409 __ pop(rbp); 410 __ ret(0); 411 412 // handle return types different from T_INT 413 __ BIND(is_long); 414 __ movq(Address(c_rarg0, 0), rax); 415 __ jmp(exit); 416 417 __ BIND(is_float); 418 __ movflt(Address(c_rarg0, 0), xmm0); 419 __ jmp(exit); 420 421 __ BIND(is_double); 422 __ movdbl(Address(c_rarg0, 0), xmm0); 423 __ jmp(exit); 424 425 return start; 426 } 427 428 // Return point for a Java call if there's an exception thrown in 429 // Java code. The exception is caught and transformed into a 430 // pending exception stored in JavaThread that can be tested from 431 // within the VM. 432 // 433 // Note: Usually the parameters are removed by the callee. In case 434 // of an exception crossing an activation frame boundary, that is 435 // not the case if the callee is compiled code => need to setup the 436 // rsp. 437 // 438 // rax: exception oop 439 440 address generate_catch_exception() { 441 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 442 address start = __ pc(); 443 444 // same as in generate_call_stub(): 445 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 446 const Address thread (rbp, thread_off * wordSize); 447 448 #ifdef ASSERT 449 // verify that threads correspond 450 { 451 Label L1, L2, L3; 452 __ cmpptr(r15_thread, thread); 453 __ jcc(Assembler::equal, L1); 454 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 455 __ bind(L1); 456 __ get_thread(rbx); 457 __ cmpptr(r15_thread, thread); 458 __ jcc(Assembler::equal, L2); 459 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 460 __ bind(L2); 461 __ cmpptr(r15_thread, rbx); 462 __ jcc(Assembler::equal, L3); 463 __ stop("StubRoutines::catch_exception: threads must correspond"); 464 __ bind(L3); 465 } 466 #endif 467 468 // set pending exception 469 __ verify_oop(rax); 470 471 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 472 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 473 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 474 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 475 476 // complete return to VM 477 assert(StubRoutines::_call_stub_return_address != NULL, 478 "_call_stub_return_address must have been generated before"); 479 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 480 481 return start; 482 } 483 484 // Continuation point for runtime calls returning with a pending 485 // exception. The pending exception check happened in the runtime 486 // or native call stub. The pending exception in Thread is 487 // converted into a Java-level exception. 488 // 489 // Contract with Java-level exception handlers: 490 // rax: exception 491 // rdx: throwing pc 492 // 493 // NOTE: At entry of this stub, exception-pc must be on stack !! 494 495 address generate_forward_exception() { 496 StubCodeMark mark(this, "StubRoutines", "forward exception"); 497 address start = __ pc(); 498 499 // Upon entry, the sp points to the return address returning into 500 // Java (interpreted or compiled) code; i.e., the return address 501 // becomes the throwing pc. 502 // 503 // Arguments pushed before the runtime call are still on the stack 504 // but the exception handler will reset the stack pointer -> 505 // ignore them. A potential result in registers can be ignored as 506 // well. 507 508 #ifdef ASSERT 509 // make sure this code is only executed if there is a pending exception 510 { 511 Label L; 512 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 513 __ jcc(Assembler::notEqual, L); 514 __ stop("StubRoutines::forward exception: no pending exception (1)"); 515 __ bind(L); 516 } 517 #endif 518 519 // compute exception handler into rbx 520 __ movptr(c_rarg0, Address(rsp, 0)); 521 BLOCK_COMMENT("call exception_handler_for_return_address"); 522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 523 SharedRuntime::exception_handler_for_return_address), 524 r15_thread, c_rarg0); 525 __ mov(rbx, rax); 526 527 // setup rax & rdx, remove return address & clear pending exception 528 __ pop(rdx); 529 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 530 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 531 532 #ifdef ASSERT 533 // make sure exception is set 534 { 535 Label L; 536 __ testptr(rax, rax); 537 __ jcc(Assembler::notEqual, L); 538 __ stop("StubRoutines::forward exception: no pending exception (2)"); 539 __ bind(L); 540 } 541 #endif 542 543 // continue at exception handler (return address removed) 544 // rax: exception 545 // rbx: exception handler 546 // rdx: throwing pc 547 __ verify_oop(rax); 548 __ jmp(rbx); 549 550 return start; 551 } 552 553 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 554 // 555 // Arguments : 556 // c_rarg0: exchange_value 557 // c_rarg0: dest 558 // 559 // Result: 560 // *dest <- ex, return (orig *dest) 561 address generate_atomic_xchg() { 562 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 563 address start = __ pc(); 564 565 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 566 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 567 __ ret(0); 568 569 return start; 570 } 571 572 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 573 // 574 // Arguments : 575 // c_rarg0: exchange_value 576 // c_rarg1: dest 577 // 578 // Result: 579 // *dest <- ex, return (orig *dest) 580 address generate_atomic_xchg_ptr() { 581 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 582 address start = __ pc(); 583 584 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 585 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 586 __ ret(0); 587 588 return start; 589 } 590 591 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 592 // jint compare_value) 593 // 594 // Arguments : 595 // c_rarg0: exchange_value 596 // c_rarg1: dest 597 // c_rarg2: compare_value 598 // 599 // Result: 600 // if ( compare_value == *dest ) { 601 // *dest = exchange_value 602 // return compare_value; 603 // else 604 // return *dest; 605 address generate_atomic_cmpxchg() { 606 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 607 address start = __ pc(); 608 609 __ movl(rax, c_rarg2); 610 if ( os::is_MP() ) __ lock(); 611 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 612 __ ret(0); 613 614 return start; 615 } 616 617 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 618 // jbyte compare_value) 619 // 620 // Arguments : 621 // c_rarg0: exchange_value 622 // c_rarg1: dest 623 // c_rarg2: compare_value 624 // 625 // Result: 626 // if ( compare_value == *dest ) { 627 // *dest = exchange_value 628 // return compare_value; 629 // else 630 // return *dest; 631 address generate_atomic_cmpxchg_byte() { 632 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 633 address start = __ pc(); 634 635 __ movsbq(rax, c_rarg2); 636 if ( os::is_MP() ) __ lock(); 637 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 638 __ ret(0); 639 640 return start; 641 } 642 643 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 644 // volatile jlong* dest, 645 // jlong compare_value) 646 // Arguments : 647 // c_rarg0: exchange_value 648 // c_rarg1: dest 649 // c_rarg2: compare_value 650 // 651 // Result: 652 // if ( compare_value == *dest ) { 653 // *dest = exchange_value 654 // return compare_value; 655 // else 656 // return *dest; 657 address generate_atomic_cmpxchg_long() { 658 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 659 address start = __ pc(); 660 661 __ movq(rax, c_rarg2); 662 if ( os::is_MP() ) __ lock(); 663 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 664 __ ret(0); 665 666 return start; 667 } 668 669 // Support for jint atomic::add(jint add_value, volatile jint* dest) 670 // 671 // Arguments : 672 // c_rarg0: add_value 673 // c_rarg1: dest 674 // 675 // Result: 676 // *dest += add_value 677 // return *dest; 678 address generate_atomic_add() { 679 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 680 address start = __ pc(); 681 682 __ movl(rax, c_rarg0); 683 if ( os::is_MP() ) __ lock(); 684 __ xaddl(Address(c_rarg1, 0), c_rarg0); 685 __ addl(rax, c_rarg0); 686 __ ret(0); 687 688 return start; 689 } 690 691 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 692 // 693 // Arguments : 694 // c_rarg0: add_value 695 // c_rarg1: dest 696 // 697 // Result: 698 // *dest += add_value 699 // return *dest; 700 address generate_atomic_add_ptr() { 701 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 702 address start = __ pc(); 703 704 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 705 if ( os::is_MP() ) __ lock(); 706 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 707 __ addptr(rax, c_rarg0); 708 __ ret(0); 709 710 return start; 711 } 712 713 // Support for intptr_t OrderAccess::fence() 714 // 715 // Arguments : 716 // 717 // Result: 718 address generate_orderaccess_fence() { 719 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 720 address start = __ pc(); 721 __ membar(Assembler::StoreLoad); 722 __ ret(0); 723 724 return start; 725 } 726 727 // Support for intptr_t get_previous_fp() 728 // 729 // This routine is used to find the previous frame pointer for the 730 // caller (current_frame_guess). This is used as part of debugging 731 // ps() is seemingly lost trying to find frames. 732 // This code assumes that caller current_frame_guess) has a frame. 733 address generate_get_previous_fp() { 734 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 735 const Address old_fp(rbp, 0); 736 const Address older_fp(rax, 0); 737 address start = __ pc(); 738 739 __ enter(); 740 __ movptr(rax, old_fp); // callers fp 741 __ movptr(rax, older_fp); // the frame for ps() 742 __ pop(rbp); 743 __ ret(0); 744 745 return start; 746 } 747 748 // Support for intptr_t get_previous_sp() 749 // 750 // This routine is used to find the previous stack pointer for the 751 // caller. 752 address generate_get_previous_sp() { 753 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 754 address start = __ pc(); 755 756 __ movptr(rax, rsp); 757 __ addptr(rax, 8); // return address is at the top of the stack. 758 __ ret(0); 759 760 return start; 761 } 762 763 //---------------------------------------------------------------------------------------------------- 764 // Support for void verify_mxcsr() 765 // 766 // This routine is used with -Xcheck:jni to verify that native 767 // JNI code does not return to Java code without restoring the 768 // MXCSR register to our expected state. 769 770 address generate_verify_mxcsr() { 771 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 772 address start = __ pc(); 773 774 const Address mxcsr_save(rsp, 0); 775 776 if (CheckJNICalls) { 777 Label ok_ret; 778 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 779 __ push(rax); 780 __ subptr(rsp, wordSize); // allocate a temp location 781 __ stmxcsr(mxcsr_save); 782 __ movl(rax, mxcsr_save); 783 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 784 __ cmp32(rax, mxcsr_std); 785 __ jcc(Assembler::equal, ok_ret); 786 787 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 788 789 __ ldmxcsr(mxcsr_std); 790 791 __ bind(ok_ret); 792 __ addptr(rsp, wordSize); 793 __ pop(rax); 794 } 795 796 __ ret(0); 797 798 return start; 799 } 800 801 address generate_shenandoah_wb() { 802 StubCodeMark mark(this, "StubRoutines", "shenandoah_wb"); 803 address start = __ pc(); 804 805 Label not_done, done, slow_case, not_an_instance, is_array; 806 807 // We use RDI, which also serves as argument register for slow call. 808 // RAX always holds the src object ptr, except after the slow call and 809 // the cmpxchg, then it holds the result. 810 // RBX and RCX are used as temporary registers. 811 __ push(rdi); 812 __ push(rbx); 813 814 // Check for object beeing in the collection set. 815 // TODO: Can we use only 1 register here? 816 // The source object arrives here in rax. 817 // live: rax 818 // live: rdi 819 __ movptr(rdi, rax); 820 __ shrptr(rdi, ShenandoahHeapRegion::RegionSizeShift); 821 // live: rbx 822 __ movptr(rbx, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 823 __ movbool(rbx, Address(rbx, rdi, Address::times_1)); 824 // unlive: rdi 825 __ testbool(rbx); 826 // unlive: rbx 827 __ jccb(Assembler::notZero, not_done); 828 829 __ pop(rbx); 830 __ pop(rdi); 831 __ ret(0); 832 833 __ bind(not_done); 834 835 __ push(rcx); 836 Register new_obj = rbx; 837 __ movptr(new_obj, Address(r15_thread, JavaThread::gclab_top_offset())); 838 __ testptr(new_obj, new_obj); 839 __ jcc(Assembler::zero, slow_case); // No TLAB. 840 841 // Figure out object size. 842 __ load_klass(rcx, rax); 843 __ movl(rcx, Address(rcx, Klass::layout_helper_offset())); 844 __ testl(rcx, Klass::_lh_instance_slow_path_bit); 845 // test to see if it has a finalizer or is malformed in some way 846 __ jcc(Assembler::notZero, slow_case); 847 __ cmpl(rcx, Klass::_lh_neutral_value); // Make sure it's an instance (LH > 0) 848 __ jcc(Assembler::lessEqual, not_an_instance); // Thrashes rcx, returns size in rcx. Uses rax. 849 __ bind(is_array); 850 851 // Size in rdi, new_obj in rbx, src obj in rax 852 853 Register new_obj_end = rdi; 854 __ lea(new_obj_end, Address(new_obj, rcx, Address::times_1)); 855 __ cmpptr(new_obj_end, Address(r15_thread, JavaThread::gclab_end_offset())); 856 __ jcc(Assembler::above, slow_case); 857 858 // Size in rcx, new_obj in rbx, src obj in rax 859 860 // Copy object. 861 Label loop; 862 __ push(rdi); // Save new_obj_end 863 __ push(rsi); 864 __ shrl(rcx, 3); // Make it num-64-bit-words 865 __ mov(rdi, rbx); // Mov dst into rdi 866 __ mov(rsi, rax); // Src into rsi. 867 __ rep_mov(); 868 __ pop(rsi); // Restore rsi. 869 __ pop(rdi); // Restore new_obj_end 870 871 // Store proper Brooks pointer after copy 872 Universe::heap()->compile_prepare_oop(_masm, new_obj); 873 874 // Src obj still in rax. 875 if (os::is_MP()) { 876 __ lock(); 877 } 878 __ cmpxchgptr(new_obj, Address(rax, BrooksPointer::byte_offset(), Address::times_1)); 879 __ jccb(Assembler::notEqual, done); // Failed. Updated object in rax. 880 // Otherwise, we succeeded. 881 __ mov(rax, new_obj); 882 __ movptr(Address(r15_thread, JavaThread::gclab_top_offset()), new_obj_end); 883 __ bind(done); 884 885 __ pop(rcx); 886 __ pop(rbx); 887 __ pop(rdi); 888 889 __ ret(0); 890 891 __ bind(not_an_instance); 892 __ push(rdx); 893 // Layout_helper bits are in rcx 894 __ movl(rdx, rcx); // Move layout_helper bits to rdx 895 __ movl(rdi, Address(rax, arrayOopDesc::length_offset_in_bytes())); 896 __ shrl(rcx, Klass::_lh_log2_element_size_shift); 897 __ andl(rcx, Klass::_lh_log2_element_size_mask); 898 __ shll(rdi); // Shifts left by number of bits in rcx (CL) 899 __ shrl(rdx, Klass::_lh_header_size_shift); 900 __ andl(rdx, Klass::_lh_header_size_mask); 901 __ addl(rdi, rdx); 902 // Round up. 903 __ addl(rdi, HeapWordSize-1); 904 __ andl(rdi, -HeapWordSize); 905 __ pop(rdx); 906 // Move size (rdi) into rcx 907 __ movl(rcx, rdi); 908 __ jmp(is_array); 909 910 __ bind(slow_case); 911 __ push(rdx); 912 __ push(rdi); 913 __ push(rsi); 914 __ push(r8); 915 __ push(r9); 916 __ push(r10); 917 __ push(r11); 918 __ push(r12); 919 __ push(r13); 920 __ push(r14); 921 __ push(r15); 922 __ save_vector_registers(); 923 __ movptr(rdi, rax); 924 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::write_barrier_c2), rdi); 925 __ restore_vector_registers(); 926 __ pop(r15); 927 __ pop(r14); 928 __ pop(r13); 929 __ pop(r12); 930 __ pop(r11); 931 __ pop(r10); 932 __ pop(r9); 933 __ pop(r8); 934 __ pop(rsi); 935 __ pop(rdi); 936 __ pop(rdx); 937 938 __ pop(rcx); 939 __ pop(rbx); 940 __ pop(rdi); 941 942 __ ret(0); 943 944 return start; 945 } 946 947 address generate_f2i_fixup() { 948 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 949 Address inout(rsp, 5 * wordSize); // return address + 4 saves 950 951 address start = __ pc(); 952 953 Label L; 954 955 __ push(rax); 956 __ push(c_rarg3); 957 __ push(c_rarg2); 958 __ push(c_rarg1); 959 960 __ movl(rax, 0x7f800000); 961 __ xorl(c_rarg3, c_rarg3); 962 __ movl(c_rarg2, inout); 963 __ movl(c_rarg1, c_rarg2); 964 __ andl(c_rarg1, 0x7fffffff); 965 __ cmpl(rax, c_rarg1); // NaN? -> 0 966 __ jcc(Assembler::negative, L); 967 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 968 __ movl(c_rarg3, 0x80000000); 969 __ movl(rax, 0x7fffffff); 970 __ cmovl(Assembler::positive, c_rarg3, rax); 971 972 __ bind(L); 973 __ movptr(inout, c_rarg3); 974 975 __ pop(c_rarg1); 976 __ pop(c_rarg2); 977 __ pop(c_rarg3); 978 __ pop(rax); 979 980 __ ret(0); 981 982 return start; 983 } 984 985 address generate_f2l_fixup() { 986 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 987 Address inout(rsp, 5 * wordSize); // return address + 4 saves 988 address start = __ pc(); 989 990 Label L; 991 992 __ push(rax); 993 __ push(c_rarg3); 994 __ push(c_rarg2); 995 __ push(c_rarg1); 996 997 __ movl(rax, 0x7f800000); 998 __ xorl(c_rarg3, c_rarg3); 999 __ movl(c_rarg2, inout); 1000 __ movl(c_rarg1, c_rarg2); 1001 __ andl(c_rarg1, 0x7fffffff); 1002 __ cmpl(rax, c_rarg1); // NaN? -> 0 1003 __ jcc(Assembler::negative, L); 1004 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 1005 __ mov64(c_rarg3, 0x8000000000000000); 1006 __ mov64(rax, 0x7fffffffffffffff); 1007 __ cmov(Assembler::positive, c_rarg3, rax); 1008 1009 __ bind(L); 1010 __ movptr(inout, c_rarg3); 1011 1012 __ pop(c_rarg1); 1013 __ pop(c_rarg2); 1014 __ pop(c_rarg3); 1015 __ pop(rax); 1016 1017 __ ret(0); 1018 1019 return start; 1020 } 1021 1022 address generate_d2i_fixup() { 1023 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 1024 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1025 1026 address start = __ pc(); 1027 1028 Label L; 1029 1030 __ push(rax); 1031 __ push(c_rarg3); 1032 __ push(c_rarg2); 1033 __ push(c_rarg1); 1034 __ push(c_rarg0); 1035 1036 __ movl(rax, 0x7ff00000); 1037 __ movq(c_rarg2, inout); 1038 __ movl(c_rarg3, c_rarg2); 1039 __ mov(c_rarg1, c_rarg2); 1040 __ mov(c_rarg0, c_rarg2); 1041 __ negl(c_rarg3); 1042 __ shrptr(c_rarg1, 0x20); 1043 __ orl(c_rarg3, c_rarg2); 1044 __ andl(c_rarg1, 0x7fffffff); 1045 __ xorl(c_rarg2, c_rarg2); 1046 __ shrl(c_rarg3, 0x1f); 1047 __ orl(c_rarg1, c_rarg3); 1048 __ cmpl(rax, c_rarg1); 1049 __ jcc(Assembler::negative, L); // NaN -> 0 1050 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 1051 __ movl(c_rarg2, 0x80000000); 1052 __ movl(rax, 0x7fffffff); 1053 __ cmov(Assembler::positive, c_rarg2, rax); 1054 1055 __ bind(L); 1056 __ movptr(inout, c_rarg2); 1057 1058 __ pop(c_rarg0); 1059 __ pop(c_rarg1); 1060 __ pop(c_rarg2); 1061 __ pop(c_rarg3); 1062 __ pop(rax); 1063 1064 __ ret(0); 1065 1066 return start; 1067 } 1068 1069 address generate_d2l_fixup() { 1070 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 1071 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1072 1073 address start = __ pc(); 1074 1075 Label L; 1076 1077 __ push(rax); 1078 __ push(c_rarg3); 1079 __ push(c_rarg2); 1080 __ push(c_rarg1); 1081 __ push(c_rarg0); 1082 1083 __ movl(rax, 0x7ff00000); 1084 __ movq(c_rarg2, inout); 1085 __ movl(c_rarg3, c_rarg2); 1086 __ mov(c_rarg1, c_rarg2); 1087 __ mov(c_rarg0, c_rarg2); 1088 __ negl(c_rarg3); 1089 __ shrptr(c_rarg1, 0x20); 1090 __ orl(c_rarg3, c_rarg2); 1091 __ andl(c_rarg1, 0x7fffffff); 1092 __ xorl(c_rarg2, c_rarg2); 1093 __ shrl(c_rarg3, 0x1f); 1094 __ orl(c_rarg1, c_rarg3); 1095 __ cmpl(rax, c_rarg1); 1096 __ jcc(Assembler::negative, L); // NaN -> 0 1097 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 1098 __ mov64(c_rarg2, 0x8000000000000000); 1099 __ mov64(rax, 0x7fffffffffffffff); 1100 __ cmovq(Assembler::positive, c_rarg2, rax); 1101 1102 __ bind(L); 1103 __ movq(inout, c_rarg2); 1104 1105 __ pop(c_rarg0); 1106 __ pop(c_rarg1); 1107 __ pop(c_rarg2); 1108 __ pop(c_rarg3); 1109 __ pop(rax); 1110 1111 __ ret(0); 1112 1113 return start; 1114 } 1115 1116 address generate_fp_mask(const char *stub_name, int64_t mask) { 1117 __ align(CodeEntryAlignment); 1118 StubCodeMark mark(this, "StubRoutines", stub_name); 1119 address start = __ pc(); 1120 1121 __ emit_data64( mask, relocInfo::none ); 1122 __ emit_data64( mask, relocInfo::none ); 1123 1124 return start; 1125 } 1126 1127 // Non-destructive plausibility checks for oops 1128 // 1129 // Arguments: 1130 // all args on stack! 1131 // 1132 // Stack after saving c_rarg3: 1133 // [tos + 0]: saved c_rarg3 1134 // [tos + 1]: saved c_rarg2 1135 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1136 // [tos + 3]: saved flags 1137 // [tos + 4]: return address 1138 // * [tos + 5]: error message (char*) 1139 // * [tos + 6]: object to verify (oop) 1140 // * [tos + 7]: saved rax - saved by caller and bashed 1141 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1142 // * = popped on exit 1143 address generate_verify_oop() { 1144 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1145 address start = __ pc(); 1146 1147 Label exit, error; 1148 1149 __ pushf(); 1150 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1151 1152 __ push(r12); 1153 1154 // save c_rarg2 and c_rarg3 1155 __ push(c_rarg2); 1156 __ push(c_rarg3); 1157 1158 enum { 1159 // After previous pushes. 1160 oop_to_verify = 6 * wordSize, 1161 saved_rax = 7 * wordSize, 1162 saved_r10 = 8 * wordSize, 1163 1164 // Before the call to MacroAssembler::debug(), see below. 1165 return_addr = 16 * wordSize, 1166 error_msg = 17 * wordSize 1167 }; 1168 1169 // get object 1170 __ movptr(rax, Address(rsp, oop_to_verify)); 1171 1172 // make sure object is 'reasonable' 1173 __ testptr(rax, rax); 1174 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1175 // Check if the oop is in the right area of memory 1176 __ movptr(c_rarg2, rax); 1177 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1178 __ andptr(c_rarg2, c_rarg3); 1179 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1180 __ cmpptr(c_rarg2, c_rarg3); 1181 __ jcc(Assembler::notZero, error); 1182 1183 // set r12 to heapbase for load_klass() 1184 __ reinit_heapbase(); 1185 1186 // make sure klass is 'reasonable', which is not zero. 1187 __ load_klass(rax, rax); // get klass 1188 __ testptr(rax, rax); 1189 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1190 1191 // return if everything seems ok 1192 __ bind(exit); 1193 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1194 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1195 __ pop(c_rarg3); // restore c_rarg3 1196 __ pop(c_rarg2); // restore c_rarg2 1197 __ pop(r12); // restore r12 1198 __ popf(); // restore flags 1199 __ ret(4 * wordSize); // pop caller saved stuff 1200 1201 // handle errors 1202 __ bind(error); 1203 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1204 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1205 __ pop(c_rarg3); // get saved c_rarg3 back 1206 __ pop(c_rarg2); // get saved c_rarg2 back 1207 __ pop(r12); // get saved r12 back 1208 __ popf(); // get saved flags off stack -- 1209 // will be ignored 1210 1211 __ pusha(); // push registers 1212 // (rip is already 1213 // already pushed) 1214 // debug(char* msg, int64_t pc, int64_t regs[]) 1215 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1216 // pushed all the registers, so now the stack looks like: 1217 // [tos + 0] 16 saved registers 1218 // [tos + 16] return address 1219 // * [tos + 17] error message (char*) 1220 // * [tos + 18] object to verify (oop) 1221 // * [tos + 19] saved rax - saved by caller and bashed 1222 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1223 // * = popped on exit 1224 1225 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1226 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1227 __ movq(c_rarg2, rsp); // pass address of regs on stack 1228 __ mov(r12, rsp); // remember rsp 1229 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1230 __ andptr(rsp, -16); // align stack as required by ABI 1231 BLOCK_COMMENT("call MacroAssembler::debug"); 1232 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1233 __ mov(rsp, r12); // restore rsp 1234 __ popa(); // pop registers (includes r12) 1235 __ ret(4 * wordSize); // pop caller saved stuff 1236 1237 return start; 1238 } 1239 1240 // 1241 // Verify that a register contains clean 32-bits positive value 1242 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1243 // 1244 // Input: 1245 // Rint - 32-bits value 1246 // Rtmp - scratch 1247 // 1248 void assert_clean_int(Register Rint, Register Rtmp) { 1249 #ifdef ASSERT 1250 Label L; 1251 assert_different_registers(Rtmp, Rint); 1252 __ movslq(Rtmp, Rint); 1253 __ cmpq(Rtmp, Rint); 1254 __ jcc(Assembler::equal, L); 1255 __ stop("high 32-bits of int value are not 0"); 1256 __ bind(L); 1257 #endif 1258 } 1259 1260 // Generate overlap test for array copy stubs 1261 // 1262 // Input: 1263 // c_rarg0 - from 1264 // c_rarg1 - to 1265 // c_rarg2 - element count 1266 // 1267 // Output: 1268 // rax - &from[element count - 1] 1269 // 1270 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1271 assert(no_overlap_target != NULL, "must be generated"); 1272 array_overlap_test(no_overlap_target, NULL, sf); 1273 } 1274 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1275 array_overlap_test(NULL, &L_no_overlap, sf); 1276 } 1277 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1278 const Register from = c_rarg0; 1279 const Register to = c_rarg1; 1280 const Register count = c_rarg2; 1281 const Register end_from = rax; 1282 1283 __ cmpptr(to, from); 1284 __ lea(end_from, Address(from, count, sf, 0)); 1285 if (NOLp == NULL) { 1286 ExternalAddress no_overlap(no_overlap_target); 1287 __ jump_cc(Assembler::belowEqual, no_overlap); 1288 __ cmpptr(to, end_from); 1289 __ jump_cc(Assembler::aboveEqual, no_overlap); 1290 } else { 1291 __ jcc(Assembler::belowEqual, (*NOLp)); 1292 __ cmpptr(to, end_from); 1293 __ jcc(Assembler::aboveEqual, (*NOLp)); 1294 } 1295 } 1296 1297 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1298 // 1299 // Outputs: 1300 // rdi - rcx 1301 // rsi - rdx 1302 // rdx - r8 1303 // rcx - r9 1304 // 1305 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1306 // are non-volatile. r9 and r10 should not be used by the caller. 1307 // 1308 void setup_arg_regs(int nargs = 3) { 1309 const Register saved_rdi = r9; 1310 const Register saved_rsi = r10; 1311 assert(nargs == 3 || nargs == 4, "else fix"); 1312 #ifdef _WIN64 1313 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1314 "unexpected argument registers"); 1315 if (nargs >= 4) 1316 __ mov(rax, r9); // r9 is also saved_rdi 1317 __ movptr(saved_rdi, rdi); 1318 __ movptr(saved_rsi, rsi); 1319 __ mov(rdi, rcx); // c_rarg0 1320 __ mov(rsi, rdx); // c_rarg1 1321 __ mov(rdx, r8); // c_rarg2 1322 if (nargs >= 4) 1323 __ mov(rcx, rax); // c_rarg3 (via rax) 1324 #else 1325 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1326 "unexpected argument registers"); 1327 #endif 1328 } 1329 1330 void restore_arg_regs() { 1331 const Register saved_rdi = r9; 1332 const Register saved_rsi = r10; 1333 #ifdef _WIN64 1334 __ movptr(rdi, saved_rdi); 1335 __ movptr(rsi, saved_rsi); 1336 #endif 1337 } 1338 1339 // Generate code for an array write pre barrier 1340 // 1341 // addr - starting address 1342 // count - element count 1343 // tmp - scratch register 1344 // 1345 // Destroy no registers! 1346 // 1347 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1348 BarrierSet* bs = Universe::heap()->barrier_set(); 1349 switch (bs->kind()) { 1350 case BarrierSet::G1SATBCTLogging: 1351 case BarrierSet::ShenandoahBarrierSet: 1352 // With G1, don't generate the call if we statically know that the target in uninitialized 1353 if (!dest_uninitialized) { 1354 __ pusha(); // push registers 1355 if (count == c_rarg0) { 1356 if (addr == c_rarg1) { 1357 // exactly backwards!! 1358 __ xchgptr(c_rarg1, c_rarg0); 1359 } else { 1360 __ movptr(c_rarg1, count); 1361 __ movptr(c_rarg0, addr); 1362 } 1363 } else { 1364 __ movptr(c_rarg0, addr); 1365 __ movptr(c_rarg1, count); 1366 } 1367 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1368 __ popa(); 1369 } 1370 break; 1371 case BarrierSet::CardTableForRS: 1372 case BarrierSet::CardTableExtension: 1373 case BarrierSet::ModRef: 1374 break; 1375 default: 1376 ShouldNotReachHere(); 1377 1378 } 1379 } 1380 1381 // 1382 // Generate code for an array write post barrier 1383 // 1384 // Input: 1385 // start - register containing starting address of destination array 1386 // count - elements count 1387 // scratch - scratch register 1388 // 1389 // The input registers are overwritten. 1390 // 1391 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1392 assert_different_registers(start, count, scratch); 1393 BarrierSet* bs = Universe::heap()->barrier_set(); 1394 switch (bs->kind()) { 1395 case BarrierSet::G1SATBCTLogging: 1396 case BarrierSet::ShenandoahBarrierSet: 1397 { 1398 __ pusha(); // push registers (overkill) 1399 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1400 assert_different_registers(c_rarg1, start); 1401 __ mov(c_rarg1, count); 1402 __ mov(c_rarg0, start); 1403 } else { 1404 assert_different_registers(c_rarg0, count); 1405 __ mov(c_rarg0, start); 1406 __ mov(c_rarg1, count); 1407 } 1408 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1409 __ popa(); 1410 } 1411 break; 1412 case BarrierSet::CardTableForRS: 1413 case BarrierSet::CardTableExtension: 1414 { 1415 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1416 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1417 1418 Label L_loop; 1419 const Register end = count; 1420 1421 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1422 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1423 __ shrptr(start, CardTableModRefBS::card_shift); 1424 __ shrptr(end, CardTableModRefBS::card_shift); 1425 __ subptr(end, start); // end --> cards count 1426 1427 int64_t disp = (int64_t) ct->byte_map_base; 1428 __ mov64(scratch, disp); 1429 __ addptr(start, scratch); 1430 __ BIND(L_loop); 1431 __ movb(Address(start, count, Address::times_1), 0); 1432 __ decrement(count); 1433 __ jcc(Assembler::greaterEqual, L_loop); 1434 } 1435 break; 1436 default: 1437 ShouldNotReachHere(); 1438 1439 } 1440 } 1441 1442 1443 // Copy big chunks forward 1444 // 1445 // Inputs: 1446 // end_from - source arrays end address 1447 // end_to - destination array end address 1448 // qword_count - 64-bits element count, negative 1449 // to - scratch 1450 // L_copy_bytes - entry label 1451 // L_copy_8_bytes - exit label 1452 // 1453 void copy_bytes_forward(Register end_from, Register end_to, 1454 Register qword_count, Register to, 1455 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1456 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1457 Label L_loop; 1458 __ align(OptoLoopAlignment); 1459 if (UseUnalignedLoadStores) { 1460 Label L_end; 1461 if (UseAVX > 2) { 1462 __ movl(to, 0xffff); 1463 __ kmovwl(k1, to); 1464 } 1465 // Copy 64-bytes per iteration 1466 __ BIND(L_loop); 1467 if (UseAVX > 2) { 1468 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1469 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1470 } else if (UseAVX == 2) { 1471 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1472 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1473 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1474 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1475 } else { 1476 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1477 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1478 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1479 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1480 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1481 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1482 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1483 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1484 } 1485 __ BIND(L_copy_bytes); 1486 __ addptr(qword_count, 8); 1487 __ jcc(Assembler::lessEqual, L_loop); 1488 __ subptr(qword_count, 4); // sub(8) and add(4) 1489 __ jccb(Assembler::greater, L_end); 1490 // Copy trailing 32 bytes 1491 if (UseAVX >= 2) { 1492 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1493 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1494 } else { 1495 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1496 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1497 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1498 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1499 } 1500 __ addptr(qword_count, 4); 1501 __ BIND(L_end); 1502 if (UseAVX >= 2) { 1503 // clean upper bits of YMM registers 1504 __ vpxor(xmm0, xmm0); 1505 __ vpxor(xmm1, xmm1); 1506 } 1507 } else { 1508 // Copy 32-bytes per iteration 1509 __ BIND(L_loop); 1510 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1511 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1512 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1513 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1514 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1515 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1516 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1517 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1518 1519 __ BIND(L_copy_bytes); 1520 __ addptr(qword_count, 4); 1521 __ jcc(Assembler::lessEqual, L_loop); 1522 } 1523 __ subptr(qword_count, 4); 1524 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1525 } 1526 1527 // Copy big chunks backward 1528 // 1529 // Inputs: 1530 // from - source arrays address 1531 // dest - destination array address 1532 // qword_count - 64-bits element count 1533 // to - scratch 1534 // L_copy_bytes - entry label 1535 // L_copy_8_bytes - exit label 1536 // 1537 void copy_bytes_backward(Register from, Register dest, 1538 Register qword_count, Register to, 1539 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1540 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1541 Label L_loop; 1542 __ align(OptoLoopAlignment); 1543 if (UseUnalignedLoadStores) { 1544 Label L_end; 1545 if (UseAVX > 2) { 1546 __ movl(to, 0xffff); 1547 __ kmovwl(k1, to); 1548 } 1549 // Copy 64-bytes per iteration 1550 __ BIND(L_loop); 1551 if (UseAVX > 2) { 1552 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1553 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1554 } else if (UseAVX == 2) { 1555 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1556 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1557 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1558 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1559 } else { 1560 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1561 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1562 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1563 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1564 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1565 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1566 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1567 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1568 } 1569 __ BIND(L_copy_bytes); 1570 __ subptr(qword_count, 8); 1571 __ jcc(Assembler::greaterEqual, L_loop); 1572 1573 __ addptr(qword_count, 4); // add(8) and sub(4) 1574 __ jccb(Assembler::less, L_end); 1575 // Copy trailing 32 bytes 1576 if (UseAVX >= 2) { 1577 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1578 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1579 } else { 1580 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1581 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1582 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1583 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1584 } 1585 __ subptr(qword_count, 4); 1586 __ BIND(L_end); 1587 if (UseAVX >= 2) { 1588 // clean upper bits of YMM registers 1589 __ vpxor(xmm0, xmm0); 1590 __ vpxor(xmm1, xmm1); 1591 } 1592 } else { 1593 // Copy 32-bytes per iteration 1594 __ BIND(L_loop); 1595 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1596 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1597 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1598 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1599 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1600 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1601 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1602 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1603 1604 __ BIND(L_copy_bytes); 1605 __ subptr(qword_count, 4); 1606 __ jcc(Assembler::greaterEqual, L_loop); 1607 } 1608 __ addptr(qword_count, 4); 1609 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1610 } 1611 1612 1613 // Arguments: 1614 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1615 // ignored 1616 // name - stub name string 1617 // 1618 // Inputs: 1619 // c_rarg0 - source array address 1620 // c_rarg1 - destination array address 1621 // c_rarg2 - element count, treated as ssize_t, can be zero 1622 // 1623 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1624 // we let the hardware handle it. The one to eight bytes within words, 1625 // dwords or qwords that span cache line boundaries will still be loaded 1626 // and stored atomically. 1627 // 1628 // Side Effects: 1629 // disjoint_byte_copy_entry is set to the no-overlap entry point 1630 // used by generate_conjoint_byte_copy(). 1631 // 1632 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1633 __ align(CodeEntryAlignment); 1634 StubCodeMark mark(this, "StubRoutines", name); 1635 address start = __ pc(); 1636 1637 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1638 Label L_copy_byte, L_exit; 1639 const Register from = rdi; // source array address 1640 const Register to = rsi; // destination array address 1641 const Register count = rdx; // elements count 1642 const Register byte_count = rcx; 1643 const Register qword_count = count; 1644 const Register end_from = from; // source array end address 1645 const Register end_to = to; // destination array end address 1646 // End pointers are inclusive, and if count is not zero they point 1647 // to the last unit copied: end_to[0] := end_from[0] 1648 1649 __ enter(); // required for proper stackwalking of RuntimeStub frame 1650 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1651 1652 if (entry != NULL) { 1653 *entry = __ pc(); 1654 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1655 BLOCK_COMMENT("Entry:"); 1656 } 1657 1658 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1659 // r9 and r10 may be used to save non-volatile registers 1660 1661 // 'from', 'to' and 'count' are now valid 1662 __ movptr(byte_count, count); 1663 __ shrptr(count, 3); // count => qword_count 1664 1665 // Copy from low to high addresses. Use 'to' as scratch. 1666 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1667 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1668 __ negptr(qword_count); // make the count negative 1669 __ jmp(L_copy_bytes); 1670 1671 // Copy trailing qwords 1672 __ BIND(L_copy_8_bytes); 1673 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1674 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1675 __ increment(qword_count); 1676 __ jcc(Assembler::notZero, L_copy_8_bytes); 1677 1678 // Check for and copy trailing dword 1679 __ BIND(L_copy_4_bytes); 1680 __ testl(byte_count, 4); 1681 __ jccb(Assembler::zero, L_copy_2_bytes); 1682 __ movl(rax, Address(end_from, 8)); 1683 __ movl(Address(end_to, 8), rax); 1684 1685 __ addptr(end_from, 4); 1686 __ addptr(end_to, 4); 1687 1688 // Check for and copy trailing word 1689 __ BIND(L_copy_2_bytes); 1690 __ testl(byte_count, 2); 1691 __ jccb(Assembler::zero, L_copy_byte); 1692 __ movw(rax, Address(end_from, 8)); 1693 __ movw(Address(end_to, 8), rax); 1694 1695 __ addptr(end_from, 2); 1696 __ addptr(end_to, 2); 1697 1698 // Check for and copy trailing byte 1699 __ BIND(L_copy_byte); 1700 __ testl(byte_count, 1); 1701 __ jccb(Assembler::zero, L_exit); 1702 __ movb(rax, Address(end_from, 8)); 1703 __ movb(Address(end_to, 8), rax); 1704 1705 __ BIND(L_exit); 1706 restore_arg_regs(); 1707 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1708 __ xorptr(rax, rax); // return 0 1709 __ leave(); // required for proper stackwalking of RuntimeStub frame 1710 __ ret(0); 1711 1712 // Copy in multi-bytes chunks 1713 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1714 __ jmp(L_copy_4_bytes); 1715 1716 return start; 1717 } 1718 1719 // Arguments: 1720 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1721 // ignored 1722 // name - stub name string 1723 // 1724 // Inputs: 1725 // c_rarg0 - source array address 1726 // c_rarg1 - destination array address 1727 // c_rarg2 - element count, treated as ssize_t, can be zero 1728 // 1729 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1730 // we let the hardware handle it. The one to eight bytes within words, 1731 // dwords or qwords that span cache line boundaries will still be loaded 1732 // and stored atomically. 1733 // 1734 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1735 address* entry, const char *name) { 1736 __ align(CodeEntryAlignment); 1737 StubCodeMark mark(this, "StubRoutines", name); 1738 address start = __ pc(); 1739 1740 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1741 const Register from = rdi; // source array address 1742 const Register to = rsi; // destination array address 1743 const Register count = rdx; // elements count 1744 const Register byte_count = rcx; 1745 const Register qword_count = count; 1746 1747 __ enter(); // required for proper stackwalking of RuntimeStub frame 1748 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1749 1750 if (entry != NULL) { 1751 *entry = __ pc(); 1752 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1753 BLOCK_COMMENT("Entry:"); 1754 } 1755 1756 array_overlap_test(nooverlap_target, Address::times_1); 1757 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1758 // r9 and r10 may be used to save non-volatile registers 1759 1760 // 'from', 'to' and 'count' are now valid 1761 __ movptr(byte_count, count); 1762 __ shrptr(count, 3); // count => qword_count 1763 1764 // Copy from high to low addresses. 1765 1766 // Check for and copy trailing byte 1767 __ testl(byte_count, 1); 1768 __ jcc(Assembler::zero, L_copy_2_bytes); 1769 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1770 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1771 __ decrement(byte_count); // Adjust for possible trailing word 1772 1773 // Check for and copy trailing word 1774 __ BIND(L_copy_2_bytes); 1775 __ testl(byte_count, 2); 1776 __ jcc(Assembler::zero, L_copy_4_bytes); 1777 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1778 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1779 1780 // Check for and copy trailing dword 1781 __ BIND(L_copy_4_bytes); 1782 __ testl(byte_count, 4); 1783 __ jcc(Assembler::zero, L_copy_bytes); 1784 __ movl(rax, Address(from, qword_count, Address::times_8)); 1785 __ movl(Address(to, qword_count, Address::times_8), rax); 1786 __ jmp(L_copy_bytes); 1787 1788 // Copy trailing qwords 1789 __ BIND(L_copy_8_bytes); 1790 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1791 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1792 __ decrement(qword_count); 1793 __ jcc(Assembler::notZero, L_copy_8_bytes); 1794 1795 restore_arg_regs(); 1796 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1797 __ xorptr(rax, rax); // return 0 1798 __ leave(); // required for proper stackwalking of RuntimeStub frame 1799 __ ret(0); 1800 1801 // Copy in multi-bytes chunks 1802 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1803 1804 restore_arg_regs(); 1805 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1806 __ xorptr(rax, rax); // return 0 1807 __ leave(); // required for proper stackwalking of RuntimeStub frame 1808 __ ret(0); 1809 1810 return start; 1811 } 1812 1813 // Arguments: 1814 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1815 // ignored 1816 // name - stub name string 1817 // 1818 // Inputs: 1819 // c_rarg0 - source array address 1820 // c_rarg1 - destination array address 1821 // c_rarg2 - element count, treated as ssize_t, can be zero 1822 // 1823 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1824 // let the hardware handle it. The two or four words within dwords 1825 // or qwords that span cache line boundaries will still be loaded 1826 // and stored atomically. 1827 // 1828 // Side Effects: 1829 // disjoint_short_copy_entry is set to the no-overlap entry point 1830 // used by generate_conjoint_short_copy(). 1831 // 1832 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1833 __ align(CodeEntryAlignment); 1834 StubCodeMark mark(this, "StubRoutines", name); 1835 address start = __ pc(); 1836 1837 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1838 const Register from = rdi; // source array address 1839 const Register to = rsi; // destination array address 1840 const Register count = rdx; // elements count 1841 const Register word_count = rcx; 1842 const Register qword_count = count; 1843 const Register end_from = from; // source array end address 1844 const Register end_to = to; // destination array end address 1845 // End pointers are inclusive, and if count is not zero they point 1846 // to the last unit copied: end_to[0] := end_from[0] 1847 1848 __ enter(); // required for proper stackwalking of RuntimeStub frame 1849 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1850 1851 if (entry != NULL) { 1852 *entry = __ pc(); 1853 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1854 BLOCK_COMMENT("Entry:"); 1855 } 1856 1857 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1858 // r9 and r10 may be used to save non-volatile registers 1859 1860 // 'from', 'to' and 'count' are now valid 1861 __ movptr(word_count, count); 1862 __ shrptr(count, 2); // count => qword_count 1863 1864 // Copy from low to high addresses. Use 'to' as scratch. 1865 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1866 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1867 __ negptr(qword_count); 1868 __ jmp(L_copy_bytes); 1869 1870 // Copy trailing qwords 1871 __ BIND(L_copy_8_bytes); 1872 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1873 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1874 __ increment(qword_count); 1875 __ jcc(Assembler::notZero, L_copy_8_bytes); 1876 1877 // Original 'dest' is trashed, so we can't use it as a 1878 // base register for a possible trailing word copy 1879 1880 // Check for and copy trailing dword 1881 __ BIND(L_copy_4_bytes); 1882 __ testl(word_count, 2); 1883 __ jccb(Assembler::zero, L_copy_2_bytes); 1884 __ movl(rax, Address(end_from, 8)); 1885 __ movl(Address(end_to, 8), rax); 1886 1887 __ addptr(end_from, 4); 1888 __ addptr(end_to, 4); 1889 1890 // Check for and copy trailing word 1891 __ BIND(L_copy_2_bytes); 1892 __ testl(word_count, 1); 1893 __ jccb(Assembler::zero, L_exit); 1894 __ movw(rax, Address(end_from, 8)); 1895 __ movw(Address(end_to, 8), rax); 1896 1897 __ BIND(L_exit); 1898 restore_arg_regs(); 1899 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1900 __ xorptr(rax, rax); // return 0 1901 __ leave(); // required for proper stackwalking of RuntimeStub frame 1902 __ ret(0); 1903 1904 // Copy in multi-bytes chunks 1905 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1906 __ jmp(L_copy_4_bytes); 1907 1908 return start; 1909 } 1910 1911 address generate_fill(BasicType t, bool aligned, const char *name) { 1912 __ align(CodeEntryAlignment); 1913 StubCodeMark mark(this, "StubRoutines", name); 1914 address start = __ pc(); 1915 1916 BLOCK_COMMENT("Entry:"); 1917 1918 const Register to = c_rarg0; // source array address 1919 const Register value = c_rarg1; // value 1920 const Register count = c_rarg2; // elements count 1921 1922 __ enter(); // required for proper stackwalking of RuntimeStub frame 1923 1924 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1925 1926 __ leave(); // required for proper stackwalking of RuntimeStub frame 1927 __ ret(0); 1928 return start; 1929 } 1930 1931 // Arguments: 1932 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1933 // ignored 1934 // name - stub name string 1935 // 1936 // Inputs: 1937 // c_rarg0 - source array address 1938 // c_rarg1 - destination array address 1939 // c_rarg2 - element count, treated as ssize_t, can be zero 1940 // 1941 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1942 // let the hardware handle it. The two or four words within dwords 1943 // or qwords that span cache line boundaries will still be loaded 1944 // and stored atomically. 1945 // 1946 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1947 address *entry, const char *name) { 1948 __ align(CodeEntryAlignment); 1949 StubCodeMark mark(this, "StubRoutines", name); 1950 address start = __ pc(); 1951 1952 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1953 const Register from = rdi; // source array address 1954 const Register to = rsi; // destination array address 1955 const Register count = rdx; // elements count 1956 const Register word_count = rcx; 1957 const Register qword_count = count; 1958 1959 __ enter(); // required for proper stackwalking of RuntimeStub frame 1960 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1961 1962 if (entry != NULL) { 1963 *entry = __ pc(); 1964 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1965 BLOCK_COMMENT("Entry:"); 1966 } 1967 1968 array_overlap_test(nooverlap_target, Address::times_2); 1969 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1970 // r9 and r10 may be used to save non-volatile registers 1971 1972 // 'from', 'to' and 'count' are now valid 1973 __ movptr(word_count, count); 1974 __ shrptr(count, 2); // count => qword_count 1975 1976 // Copy from high to low addresses. Use 'to' as scratch. 1977 1978 // Check for and copy trailing word 1979 __ testl(word_count, 1); 1980 __ jccb(Assembler::zero, L_copy_4_bytes); 1981 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1982 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1983 1984 // Check for and copy trailing dword 1985 __ BIND(L_copy_4_bytes); 1986 __ testl(word_count, 2); 1987 __ jcc(Assembler::zero, L_copy_bytes); 1988 __ movl(rax, Address(from, qword_count, Address::times_8)); 1989 __ movl(Address(to, qword_count, Address::times_8), rax); 1990 __ jmp(L_copy_bytes); 1991 1992 // Copy trailing qwords 1993 __ BIND(L_copy_8_bytes); 1994 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1995 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1996 __ decrement(qword_count); 1997 __ jcc(Assembler::notZero, L_copy_8_bytes); 1998 1999 restore_arg_regs(); 2000 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 2001 __ xorptr(rax, rax); // return 0 2002 __ leave(); // required for proper stackwalking of RuntimeStub frame 2003 __ ret(0); 2004 2005 // Copy in multi-bytes chunks 2006 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2007 2008 restore_arg_regs(); 2009 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 2010 __ xorptr(rax, rax); // return 0 2011 __ leave(); // required for proper stackwalking of RuntimeStub frame 2012 __ ret(0); 2013 2014 return start; 2015 } 2016 2017 // Arguments: 2018 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2019 // ignored 2020 // is_oop - true => oop array, so generate store check code 2021 // name - stub name string 2022 // 2023 // Inputs: 2024 // c_rarg0 - source array address 2025 // c_rarg1 - destination array address 2026 // c_rarg2 - element count, treated as ssize_t, can be zero 2027 // 2028 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2029 // the hardware handle it. The two dwords within qwords that span 2030 // cache line boundaries will still be loaded and stored atomicly. 2031 // 2032 // Side Effects: 2033 // disjoint_int_copy_entry is set to the no-overlap entry point 2034 // used by generate_conjoint_int_oop_copy(). 2035 // 2036 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 2037 const char *name, bool dest_uninitialized = false) { 2038 __ align(CodeEntryAlignment); 2039 StubCodeMark mark(this, "StubRoutines", name); 2040 address start = __ pc(); 2041 2042 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 2043 const Register from = rdi; // source array address 2044 const Register to = rsi; // destination array address 2045 const Register count = rdx; // elements count 2046 const Register dword_count = rcx; 2047 const Register qword_count = count; 2048 const Register end_from = from; // source array end address 2049 const Register end_to = to; // destination array end address 2050 const Register saved_to = r11; // saved destination array address 2051 // End pointers are inclusive, and if count is not zero they point 2052 // to the last unit copied: end_to[0] := end_from[0] 2053 2054 __ enter(); // required for proper stackwalking of RuntimeStub frame 2055 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2056 2057 if (entry != NULL) { 2058 *entry = __ pc(); 2059 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2060 BLOCK_COMMENT("Entry:"); 2061 } 2062 2063 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2064 // r9 and r10 may be used to save non-volatile registers 2065 if (is_oop) { 2066 __ movq(saved_to, to); 2067 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2068 } 2069 2070 // 'from', 'to' and 'count' are now valid 2071 __ movptr(dword_count, count); 2072 __ shrptr(count, 1); // count => qword_count 2073 2074 // Copy from low to high addresses. Use 'to' as scratch. 2075 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2076 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2077 __ negptr(qword_count); 2078 __ jmp(L_copy_bytes); 2079 2080 // Copy trailing qwords 2081 __ BIND(L_copy_8_bytes); 2082 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2083 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2084 __ increment(qword_count); 2085 __ jcc(Assembler::notZero, L_copy_8_bytes); 2086 2087 // Check for and copy trailing dword 2088 __ BIND(L_copy_4_bytes); 2089 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2090 __ jccb(Assembler::zero, L_exit); 2091 __ movl(rax, Address(end_from, 8)); 2092 __ movl(Address(end_to, 8), rax); 2093 2094 __ BIND(L_exit); 2095 if (is_oop) { 2096 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 2097 } 2098 restore_arg_regs(); 2099 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2100 __ xorptr(rax, rax); // return 0 2101 __ leave(); // required for proper stackwalking of RuntimeStub frame 2102 __ ret(0); 2103 2104 // Copy in multi-bytes chunks 2105 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2106 __ jmp(L_copy_4_bytes); 2107 2108 return start; 2109 } 2110 2111 // Arguments: 2112 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2113 // ignored 2114 // is_oop - true => oop array, so generate store check code 2115 // name - stub name string 2116 // 2117 // Inputs: 2118 // c_rarg0 - source array address 2119 // c_rarg1 - destination array address 2120 // c_rarg2 - element count, treated as ssize_t, can be zero 2121 // 2122 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2123 // the hardware handle it. The two dwords within qwords that span 2124 // cache line boundaries will still be loaded and stored atomicly. 2125 // 2126 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2127 address *entry, const char *name, 2128 bool dest_uninitialized = false) { 2129 __ align(CodeEntryAlignment); 2130 StubCodeMark mark(this, "StubRoutines", name); 2131 address start = __ pc(); 2132 2133 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2134 const Register from = rdi; // source array address 2135 const Register to = rsi; // destination array address 2136 const Register count = rdx; // elements count 2137 const Register dword_count = rcx; 2138 const Register qword_count = count; 2139 2140 __ enter(); // required for proper stackwalking of RuntimeStub frame 2141 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2142 2143 if (entry != NULL) { 2144 *entry = __ pc(); 2145 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2146 BLOCK_COMMENT("Entry:"); 2147 } 2148 2149 array_overlap_test(nooverlap_target, Address::times_4); 2150 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2151 // r9 and r10 may be used to save non-volatile registers 2152 2153 if (is_oop) { 2154 // no registers are destroyed by this call 2155 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2156 } 2157 2158 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2159 // 'from', 'to' and 'count' are now valid 2160 __ movptr(dword_count, count); 2161 __ shrptr(count, 1); // count => qword_count 2162 2163 // Copy from high to low addresses. Use 'to' as scratch. 2164 2165 // Check for and copy trailing dword 2166 __ testl(dword_count, 1); 2167 __ jcc(Assembler::zero, L_copy_bytes); 2168 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2169 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2170 __ jmp(L_copy_bytes); 2171 2172 // Copy trailing qwords 2173 __ BIND(L_copy_8_bytes); 2174 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2175 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2176 __ decrement(qword_count); 2177 __ jcc(Assembler::notZero, L_copy_8_bytes); 2178 2179 if (is_oop) { 2180 __ jmp(L_exit); 2181 } 2182 restore_arg_regs(); 2183 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2184 __ xorptr(rax, rax); // return 0 2185 __ leave(); // required for proper stackwalking of RuntimeStub frame 2186 __ ret(0); 2187 2188 // Copy in multi-bytes chunks 2189 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2190 2191 __ BIND(L_exit); 2192 if (is_oop) { 2193 gen_write_ref_array_post_barrier(to, dword_count, rax); 2194 } 2195 restore_arg_regs(); 2196 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2197 __ xorptr(rax, rax); // return 0 2198 __ leave(); // required for proper stackwalking of RuntimeStub frame 2199 __ ret(0); 2200 2201 return start; 2202 } 2203 2204 // Arguments: 2205 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2206 // ignored 2207 // is_oop - true => oop array, so generate store check code 2208 // name - stub name string 2209 // 2210 // Inputs: 2211 // c_rarg0 - source array address 2212 // c_rarg1 - destination array address 2213 // c_rarg2 - element count, treated as ssize_t, can be zero 2214 // 2215 // Side Effects: 2216 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2217 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2218 // 2219 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2220 const char *name, bool dest_uninitialized = false) { 2221 __ align(CodeEntryAlignment); 2222 StubCodeMark mark(this, "StubRoutines", name); 2223 address start = __ pc(); 2224 2225 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2226 const Register from = rdi; // source array address 2227 const Register to = rsi; // destination array address 2228 const Register qword_count = rdx; // elements count 2229 const Register end_from = from; // source array end address 2230 const Register end_to = rcx; // destination array end address 2231 const Register saved_to = to; 2232 const Register saved_count = r11; 2233 // End pointers are inclusive, and if count is not zero they point 2234 // to the last unit copied: end_to[0] := end_from[0] 2235 2236 __ enter(); // required for proper stackwalking of RuntimeStub frame 2237 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2238 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2239 2240 if (entry != NULL) { 2241 *entry = __ pc(); 2242 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2243 BLOCK_COMMENT("Entry:"); 2244 } 2245 2246 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2247 // r9 and r10 may be used to save non-volatile registers 2248 // 'from', 'to' and 'qword_count' are now valid 2249 if (is_oop) { 2250 // Save to and count for store barrier 2251 __ movptr(saved_count, qword_count); 2252 // no registers are destroyed by this call 2253 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2254 } 2255 2256 // Copy from low to high addresses. Use 'to' as scratch. 2257 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2258 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2259 __ negptr(qword_count); 2260 __ jmp(L_copy_bytes); 2261 2262 // Copy trailing qwords 2263 __ BIND(L_copy_8_bytes); 2264 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2265 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2266 __ increment(qword_count); 2267 __ jcc(Assembler::notZero, L_copy_8_bytes); 2268 2269 if (is_oop) { 2270 __ jmp(L_exit); 2271 } else { 2272 restore_arg_regs(); 2273 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2274 __ xorptr(rax, rax); // return 0 2275 __ leave(); // required for proper stackwalking of RuntimeStub frame 2276 __ ret(0); 2277 } 2278 2279 // Copy in multi-bytes chunks 2280 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2281 2282 if (is_oop) { 2283 __ BIND(L_exit); 2284 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2285 } 2286 restore_arg_regs(); 2287 if (is_oop) { 2288 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2289 } else { 2290 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2291 } 2292 __ xorptr(rax, rax); // return 0 2293 __ leave(); // required for proper stackwalking of RuntimeStub frame 2294 __ ret(0); 2295 2296 return start; 2297 } 2298 2299 // Arguments: 2300 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2301 // ignored 2302 // is_oop - true => oop array, so generate store check code 2303 // name - stub name string 2304 // 2305 // Inputs: 2306 // c_rarg0 - source array address 2307 // c_rarg1 - destination array address 2308 // c_rarg2 - element count, treated as ssize_t, can be zero 2309 // 2310 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2311 address nooverlap_target, address *entry, 2312 const char *name, bool dest_uninitialized = false) { 2313 __ align(CodeEntryAlignment); 2314 StubCodeMark mark(this, "StubRoutines", name); 2315 address start = __ pc(); 2316 2317 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2318 const Register from = rdi; // source array address 2319 const Register to = rsi; // destination array address 2320 const Register qword_count = rdx; // elements count 2321 const Register saved_count = rcx; 2322 2323 __ enter(); // required for proper stackwalking of RuntimeStub frame 2324 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2325 2326 if (entry != NULL) { 2327 *entry = __ pc(); 2328 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2329 BLOCK_COMMENT("Entry:"); 2330 } 2331 2332 array_overlap_test(nooverlap_target, Address::times_8); 2333 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2334 // r9 and r10 may be used to save non-volatile registers 2335 // 'from', 'to' and 'qword_count' are now valid 2336 if (is_oop) { 2337 // Save to and count for store barrier 2338 __ movptr(saved_count, qword_count); 2339 // No registers are destroyed by this call 2340 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2341 } 2342 2343 __ jmp(L_copy_bytes); 2344 2345 // Copy trailing qwords 2346 __ BIND(L_copy_8_bytes); 2347 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2348 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2349 __ decrement(qword_count); 2350 __ jcc(Assembler::notZero, L_copy_8_bytes); 2351 2352 if (is_oop) { 2353 __ jmp(L_exit); 2354 } else { 2355 restore_arg_regs(); 2356 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2357 __ xorptr(rax, rax); // return 0 2358 __ leave(); // required for proper stackwalking of RuntimeStub frame 2359 __ ret(0); 2360 } 2361 2362 // Copy in multi-bytes chunks 2363 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2364 2365 if (is_oop) { 2366 __ BIND(L_exit); 2367 gen_write_ref_array_post_barrier(to, saved_count, rax); 2368 } 2369 restore_arg_regs(); 2370 if (is_oop) { 2371 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2372 } else { 2373 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2374 } 2375 __ xorptr(rax, rax); // return 0 2376 __ leave(); // required for proper stackwalking of RuntimeStub frame 2377 __ ret(0); 2378 2379 return start; 2380 } 2381 2382 2383 // Helper for generating a dynamic type check. 2384 // Smashes no registers. 2385 void generate_type_check(Register sub_klass, 2386 Register super_check_offset, 2387 Register super_klass, 2388 Label& L_success) { 2389 assert_different_registers(sub_klass, super_check_offset, super_klass); 2390 2391 BLOCK_COMMENT("type_check:"); 2392 2393 Label L_miss; 2394 2395 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2396 super_check_offset); 2397 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2398 2399 // Fall through on failure! 2400 __ BIND(L_miss); 2401 } 2402 2403 // 2404 // Generate checkcasting array copy stub 2405 // 2406 // Input: 2407 // c_rarg0 - source array address 2408 // c_rarg1 - destination array address 2409 // c_rarg2 - element count, treated as ssize_t, can be zero 2410 // c_rarg3 - size_t ckoff (super_check_offset) 2411 // not Win64 2412 // c_rarg4 - oop ckval (super_klass) 2413 // Win64 2414 // rsp+40 - oop ckval (super_klass) 2415 // 2416 // Output: 2417 // rax == 0 - success 2418 // rax == -1^K - failure, where K is partial transfer count 2419 // 2420 address generate_checkcast_copy(const char *name, address *entry, 2421 bool dest_uninitialized = false) { 2422 2423 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2424 2425 // Input registers (after setup_arg_regs) 2426 const Register from = rdi; // source array address 2427 const Register to = rsi; // destination array address 2428 const Register length = rdx; // elements count 2429 const Register ckoff = rcx; // super_check_offset 2430 const Register ckval = r8; // super_klass 2431 2432 // Registers used as temps (r13, r14 are save-on-entry) 2433 const Register end_from = from; // source array end address 2434 const Register end_to = r13; // destination array end address 2435 const Register count = rdx; // -(count_remaining) 2436 const Register r14_length = r14; // saved copy of length 2437 // End pointers are inclusive, and if length is not zero they point 2438 // to the last unit copied: end_to[0] := end_from[0] 2439 2440 const Register rax_oop = rax; // actual oop copied 2441 const Register r11_klass = r11; // oop._klass 2442 2443 //--------------------------------------------------------------- 2444 // Assembler stub will be used for this call to arraycopy 2445 // if the two arrays are subtypes of Object[] but the 2446 // destination array type is not equal to or a supertype 2447 // of the source type. Each element must be separately 2448 // checked. 2449 2450 __ align(CodeEntryAlignment); 2451 StubCodeMark mark(this, "StubRoutines", name); 2452 address start = __ pc(); 2453 2454 __ enter(); // required for proper stackwalking of RuntimeStub frame 2455 2456 #ifdef ASSERT 2457 // caller guarantees that the arrays really are different 2458 // otherwise, we would have to make conjoint checks 2459 { Label L; 2460 array_overlap_test(L, TIMES_OOP); 2461 __ stop("checkcast_copy within a single array"); 2462 __ bind(L); 2463 } 2464 #endif //ASSERT 2465 2466 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2467 // ckoff => rcx, ckval => r8 2468 // r9 and r10 may be used to save non-volatile registers 2469 #ifdef _WIN64 2470 // last argument (#4) is on stack on Win64 2471 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2472 #endif 2473 2474 // Caller of this entry point must set up the argument registers. 2475 if (entry != NULL) { 2476 *entry = __ pc(); 2477 BLOCK_COMMENT("Entry:"); 2478 } 2479 2480 // allocate spill slots for r13, r14 2481 enum { 2482 saved_r13_offset, 2483 saved_r14_offset, 2484 saved_rbp_offset 2485 }; 2486 __ subptr(rsp, saved_rbp_offset * wordSize); 2487 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2488 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2489 2490 // check that int operands are properly extended to size_t 2491 assert_clean_int(length, rax); 2492 assert_clean_int(ckoff, rax); 2493 2494 #ifdef ASSERT 2495 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2496 // The ckoff and ckval must be mutually consistent, 2497 // even though caller generates both. 2498 { Label L; 2499 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2500 __ cmpl(ckoff, Address(ckval, sco_offset)); 2501 __ jcc(Assembler::equal, L); 2502 __ stop("super_check_offset inconsistent"); 2503 __ bind(L); 2504 } 2505 #endif //ASSERT 2506 2507 // Loop-invariant addresses. They are exclusive end pointers. 2508 Address end_from_addr(from, length, TIMES_OOP, 0); 2509 Address end_to_addr(to, length, TIMES_OOP, 0); 2510 // Loop-variant addresses. They assume post-incremented count < 0. 2511 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2512 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2513 2514 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2515 2516 // Copy from low to high addresses, indexed from the end of each array. 2517 __ lea(end_from, end_from_addr); 2518 __ lea(end_to, end_to_addr); 2519 __ movptr(r14_length, length); // save a copy of the length 2520 assert(length == count, ""); // else fix next line: 2521 __ negptr(count); // negate and test the length 2522 __ jcc(Assembler::notZero, L_load_element); 2523 2524 // Empty array: Nothing to do. 2525 __ xorptr(rax, rax); // return 0 on (trivial) success 2526 __ jmp(L_done); 2527 2528 // ======== begin loop ======== 2529 // (Loop is rotated; its entry is L_load_element.) 2530 // Loop control: 2531 // for (count = -count; count != 0; count++) 2532 // Base pointers src, dst are biased by 8*(count-1),to last element. 2533 __ align(OptoLoopAlignment); 2534 2535 __ BIND(L_store_element); 2536 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2537 __ increment(count); // increment the count toward zero 2538 __ jcc(Assembler::zero, L_do_card_marks); 2539 2540 // ======== loop entry is here ======== 2541 __ BIND(L_load_element); 2542 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2543 __ testptr(rax_oop, rax_oop); 2544 __ jcc(Assembler::zero, L_store_element); 2545 2546 __ load_klass(r11_klass, rax_oop);// query the object klass 2547 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2548 // ======== end loop ======== 2549 2550 // It was a real error; we must depend on the caller to finish the job. 2551 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2552 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2553 // and report their number to the caller. 2554 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2555 Label L_post_barrier; 2556 __ addptr(r14_length, count); // K = (original - remaining) oops 2557 __ movptr(rax, r14_length); // save the value 2558 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2559 __ jccb(Assembler::notZero, L_post_barrier); 2560 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2561 2562 // Come here on success only. 2563 __ BIND(L_do_card_marks); 2564 __ xorptr(rax, rax); // return 0 on success 2565 2566 __ BIND(L_post_barrier); 2567 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2568 2569 // Common exit point (success or failure). 2570 __ BIND(L_done); 2571 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2572 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2573 restore_arg_regs(); 2574 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2575 __ leave(); // required for proper stackwalking of RuntimeStub frame 2576 __ ret(0); 2577 2578 return start; 2579 } 2580 2581 // 2582 // Generate 'unsafe' array copy stub 2583 // Though just as safe as the other stubs, it takes an unscaled 2584 // size_t argument instead of an element count. 2585 // 2586 // Input: 2587 // c_rarg0 - source array address 2588 // c_rarg1 - destination array address 2589 // c_rarg2 - byte count, treated as ssize_t, can be zero 2590 // 2591 // Examines the alignment of the operands and dispatches 2592 // to a long, int, short, or byte copy loop. 2593 // 2594 address generate_unsafe_copy(const char *name, 2595 address byte_copy_entry, address short_copy_entry, 2596 address int_copy_entry, address long_copy_entry) { 2597 2598 Label L_long_aligned, L_int_aligned, L_short_aligned; 2599 2600 // Input registers (before setup_arg_regs) 2601 const Register from = c_rarg0; // source array address 2602 const Register to = c_rarg1; // destination array address 2603 const Register size = c_rarg2; // byte count (size_t) 2604 2605 // Register used as a temp 2606 const Register bits = rax; // test copy of low bits 2607 2608 __ align(CodeEntryAlignment); 2609 StubCodeMark mark(this, "StubRoutines", name); 2610 address start = __ pc(); 2611 2612 __ enter(); // required for proper stackwalking of RuntimeStub frame 2613 2614 // bump this on entry, not on exit: 2615 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2616 2617 __ mov(bits, from); 2618 __ orptr(bits, to); 2619 __ orptr(bits, size); 2620 2621 __ testb(bits, BytesPerLong-1); 2622 __ jccb(Assembler::zero, L_long_aligned); 2623 2624 __ testb(bits, BytesPerInt-1); 2625 __ jccb(Assembler::zero, L_int_aligned); 2626 2627 __ testb(bits, BytesPerShort-1); 2628 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2629 2630 __ BIND(L_short_aligned); 2631 __ shrptr(size, LogBytesPerShort); // size => short_count 2632 __ jump(RuntimeAddress(short_copy_entry)); 2633 2634 __ BIND(L_int_aligned); 2635 __ shrptr(size, LogBytesPerInt); // size => int_count 2636 __ jump(RuntimeAddress(int_copy_entry)); 2637 2638 __ BIND(L_long_aligned); 2639 __ shrptr(size, LogBytesPerLong); // size => qword_count 2640 __ jump(RuntimeAddress(long_copy_entry)); 2641 2642 return start; 2643 } 2644 2645 // Perform range checks on the proposed arraycopy. 2646 // Kills temp, but nothing else. 2647 // Also, clean the sign bits of src_pos and dst_pos. 2648 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2649 Register src_pos, // source position (c_rarg1) 2650 Register dst, // destination array oo (c_rarg2) 2651 Register dst_pos, // destination position (c_rarg3) 2652 Register length, 2653 Register temp, 2654 Label& L_failed) { 2655 BLOCK_COMMENT("arraycopy_range_checks:"); 2656 2657 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2658 __ movl(temp, length); 2659 __ addl(temp, src_pos); // src_pos + length 2660 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2661 __ jcc(Assembler::above, L_failed); 2662 2663 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2664 __ movl(temp, length); 2665 __ addl(temp, dst_pos); // dst_pos + length 2666 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2667 __ jcc(Assembler::above, L_failed); 2668 2669 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2670 // Move with sign extension can be used since they are positive. 2671 __ movslq(src_pos, src_pos); 2672 __ movslq(dst_pos, dst_pos); 2673 2674 BLOCK_COMMENT("arraycopy_range_checks done"); 2675 } 2676 2677 // 2678 // Generate generic array copy stubs 2679 // 2680 // Input: 2681 // c_rarg0 - src oop 2682 // c_rarg1 - src_pos (32-bits) 2683 // c_rarg2 - dst oop 2684 // c_rarg3 - dst_pos (32-bits) 2685 // not Win64 2686 // c_rarg4 - element count (32-bits) 2687 // Win64 2688 // rsp+40 - element count (32-bits) 2689 // 2690 // Output: 2691 // rax == 0 - success 2692 // rax == -1^K - failure, where K is partial transfer count 2693 // 2694 address generate_generic_copy(const char *name, 2695 address byte_copy_entry, address short_copy_entry, 2696 address int_copy_entry, address oop_copy_entry, 2697 address long_copy_entry, address checkcast_copy_entry) { 2698 2699 Label L_failed, L_failed_0, L_objArray; 2700 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2701 2702 // Input registers 2703 const Register src = c_rarg0; // source array oop 2704 const Register src_pos = c_rarg1; // source position 2705 const Register dst = c_rarg2; // destination array oop 2706 const Register dst_pos = c_rarg3; // destination position 2707 #ifndef _WIN64 2708 const Register length = c_rarg4; 2709 #else 2710 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2711 #endif 2712 2713 { int modulus = CodeEntryAlignment; 2714 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2715 int advance = target - (__ offset() % modulus); 2716 if (advance < 0) advance += modulus; 2717 if (advance > 0) __ nop(advance); 2718 } 2719 StubCodeMark mark(this, "StubRoutines", name); 2720 2721 // Short-hop target to L_failed. Makes for denser prologue code. 2722 __ BIND(L_failed_0); 2723 __ jmp(L_failed); 2724 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2725 2726 __ align(CodeEntryAlignment); 2727 address start = __ pc(); 2728 2729 __ enter(); // required for proper stackwalking of RuntimeStub frame 2730 2731 // bump this on entry, not on exit: 2732 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2733 2734 //----------------------------------------------------------------------- 2735 // Assembler stub will be used for this call to arraycopy 2736 // if the following conditions are met: 2737 // 2738 // (1) src and dst must not be null. 2739 // (2) src_pos must not be negative. 2740 // (3) dst_pos must not be negative. 2741 // (4) length must not be negative. 2742 // (5) src klass and dst klass should be the same and not NULL. 2743 // (6) src and dst should be arrays. 2744 // (7) src_pos + length must not exceed length of src. 2745 // (8) dst_pos + length must not exceed length of dst. 2746 // 2747 2748 // if (src == NULL) return -1; 2749 __ testptr(src, src); // src oop 2750 size_t j1off = __ offset(); 2751 __ jccb(Assembler::zero, L_failed_0); 2752 2753 // if (src_pos < 0) return -1; 2754 __ testl(src_pos, src_pos); // src_pos (32-bits) 2755 __ jccb(Assembler::negative, L_failed_0); 2756 2757 // if (dst == NULL) return -1; 2758 __ testptr(dst, dst); // dst oop 2759 __ jccb(Assembler::zero, L_failed_0); 2760 2761 // if (dst_pos < 0) return -1; 2762 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2763 size_t j4off = __ offset(); 2764 __ jccb(Assembler::negative, L_failed_0); 2765 2766 // The first four tests are very dense code, 2767 // but not quite dense enough to put four 2768 // jumps in a 16-byte instruction fetch buffer. 2769 // That's good, because some branch predicters 2770 // do not like jumps so close together. 2771 // Make sure of this. 2772 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2773 2774 // registers used as temp 2775 const Register r11_length = r11; // elements count to copy 2776 const Register r10_src_klass = r10; // array klass 2777 2778 // if (length < 0) return -1; 2779 __ movl(r11_length, length); // length (elements count, 32-bits value) 2780 __ testl(r11_length, r11_length); 2781 __ jccb(Assembler::negative, L_failed_0); 2782 2783 __ load_klass(r10_src_klass, src); 2784 #ifdef ASSERT 2785 // assert(src->klass() != NULL); 2786 { 2787 BLOCK_COMMENT("assert klasses not null {"); 2788 Label L1, L2; 2789 __ testptr(r10_src_klass, r10_src_klass); 2790 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2791 __ bind(L1); 2792 __ stop("broken null klass"); 2793 __ bind(L2); 2794 __ load_klass(rax, dst); 2795 __ cmpq(rax, 0); 2796 __ jcc(Assembler::equal, L1); // this would be broken also 2797 BLOCK_COMMENT("} assert klasses not null done"); 2798 } 2799 #endif 2800 2801 // Load layout helper (32-bits) 2802 // 2803 // |array_tag| | header_size | element_type | |log2_element_size| 2804 // 32 30 24 16 8 2 0 2805 // 2806 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2807 // 2808 2809 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2810 2811 // Handle objArrays completely differently... 2812 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2813 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2814 __ jcc(Assembler::equal, L_objArray); 2815 2816 // if (src->klass() != dst->klass()) return -1; 2817 __ load_klass(rax, dst); 2818 __ cmpq(r10_src_klass, rax); 2819 __ jcc(Assembler::notEqual, L_failed); 2820 2821 const Register rax_lh = rax; // layout helper 2822 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2823 2824 // if (!src->is_Array()) return -1; 2825 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2826 __ jcc(Assembler::greaterEqual, L_failed); 2827 2828 // At this point, it is known to be a typeArray (array_tag 0x3). 2829 #ifdef ASSERT 2830 { 2831 BLOCK_COMMENT("assert primitive array {"); 2832 Label L; 2833 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2834 __ jcc(Assembler::greaterEqual, L); 2835 __ stop("must be a primitive array"); 2836 __ bind(L); 2837 BLOCK_COMMENT("} assert primitive array done"); 2838 } 2839 #endif 2840 2841 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2842 r10, L_failed); 2843 2844 // TypeArrayKlass 2845 // 2846 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2847 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2848 // 2849 2850 const Register r10_offset = r10; // array offset 2851 const Register rax_elsize = rax_lh; // element size 2852 2853 __ movl(r10_offset, rax_lh); 2854 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2855 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2856 __ addptr(src, r10_offset); // src array offset 2857 __ addptr(dst, r10_offset); // dst array offset 2858 BLOCK_COMMENT("choose copy loop based on element size"); 2859 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2860 2861 // next registers should be set before the jump to corresponding stub 2862 const Register from = c_rarg0; // source array address 2863 const Register to = c_rarg1; // destination array address 2864 const Register count = c_rarg2; // elements count 2865 2866 // 'from', 'to', 'count' registers should be set in such order 2867 // since they are the same as 'src', 'src_pos', 'dst'. 2868 2869 __ BIND(L_copy_bytes); 2870 __ cmpl(rax_elsize, 0); 2871 __ jccb(Assembler::notEqual, L_copy_shorts); 2872 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2873 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2874 __ movl2ptr(count, r11_length); // length 2875 __ jump(RuntimeAddress(byte_copy_entry)); 2876 2877 __ BIND(L_copy_shorts); 2878 __ cmpl(rax_elsize, LogBytesPerShort); 2879 __ jccb(Assembler::notEqual, L_copy_ints); 2880 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2881 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2882 __ movl2ptr(count, r11_length); // length 2883 __ jump(RuntimeAddress(short_copy_entry)); 2884 2885 __ BIND(L_copy_ints); 2886 __ cmpl(rax_elsize, LogBytesPerInt); 2887 __ jccb(Assembler::notEqual, L_copy_longs); 2888 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2889 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2890 __ movl2ptr(count, r11_length); // length 2891 __ jump(RuntimeAddress(int_copy_entry)); 2892 2893 __ BIND(L_copy_longs); 2894 #ifdef ASSERT 2895 { 2896 BLOCK_COMMENT("assert long copy {"); 2897 Label L; 2898 __ cmpl(rax_elsize, LogBytesPerLong); 2899 __ jcc(Assembler::equal, L); 2900 __ stop("must be long copy, but elsize is wrong"); 2901 __ bind(L); 2902 BLOCK_COMMENT("} assert long copy done"); 2903 } 2904 #endif 2905 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2906 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2907 __ movl2ptr(count, r11_length); // length 2908 __ jump(RuntimeAddress(long_copy_entry)); 2909 2910 // ObjArrayKlass 2911 __ BIND(L_objArray); 2912 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2913 2914 Label L_plain_copy, L_checkcast_copy; 2915 // test array classes for subtyping 2916 __ load_klass(rax, dst); 2917 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2918 __ jcc(Assembler::notEqual, L_checkcast_copy); 2919 2920 // Identically typed arrays can be copied without element-wise checks. 2921 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2922 r10, L_failed); 2923 2924 __ lea(from, Address(src, src_pos, TIMES_OOP, 2925 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2926 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2927 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2928 __ movl2ptr(count, r11_length); // length 2929 __ BIND(L_plain_copy); 2930 __ jump(RuntimeAddress(oop_copy_entry)); 2931 2932 __ BIND(L_checkcast_copy); 2933 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2934 { 2935 // Before looking at dst.length, make sure dst is also an objArray. 2936 __ cmpl(Address(rax, lh_offset), objArray_lh); 2937 __ jcc(Assembler::notEqual, L_failed); 2938 2939 // It is safe to examine both src.length and dst.length. 2940 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2941 rax, L_failed); 2942 2943 const Register r11_dst_klass = r11; 2944 __ load_klass(r11_dst_klass, dst); // reload 2945 2946 // Marshal the base address arguments now, freeing registers. 2947 __ lea(from, Address(src, src_pos, TIMES_OOP, 2948 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2949 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2950 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2951 __ movl(count, length); // length (reloaded) 2952 Register sco_temp = c_rarg3; // this register is free now 2953 assert_different_registers(from, to, count, sco_temp, 2954 r11_dst_klass, r10_src_klass); 2955 assert_clean_int(count, sco_temp); 2956 2957 // Generate the type check. 2958 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2959 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2960 assert_clean_int(sco_temp, rax); 2961 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2962 2963 // Fetch destination element klass from the ObjArrayKlass header. 2964 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2965 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2966 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2967 assert_clean_int(sco_temp, rax); 2968 2969 // the checkcast_copy loop needs two extra arguments: 2970 assert(c_rarg3 == sco_temp, "#3 already in place"); 2971 // Set up arguments for checkcast_copy_entry. 2972 setup_arg_regs(4); 2973 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2974 __ jump(RuntimeAddress(checkcast_copy_entry)); 2975 } 2976 2977 __ BIND(L_failed); 2978 __ xorptr(rax, rax); 2979 __ notptr(rax); // return -1 2980 __ leave(); // required for proper stackwalking of RuntimeStub frame 2981 __ ret(0); 2982 2983 return start; 2984 } 2985 2986 void generate_arraycopy_stubs() { 2987 address entry; 2988 address entry_jbyte_arraycopy; 2989 address entry_jshort_arraycopy; 2990 address entry_jint_arraycopy; 2991 address entry_oop_arraycopy; 2992 address entry_jlong_arraycopy; 2993 address entry_checkcast_arraycopy; 2994 2995 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2996 "jbyte_disjoint_arraycopy"); 2997 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2998 "jbyte_arraycopy"); 2999 3000 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3001 "jshort_disjoint_arraycopy"); 3002 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 3003 "jshort_arraycopy"); 3004 3005 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 3006 "jint_disjoint_arraycopy"); 3007 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 3008 &entry_jint_arraycopy, "jint_arraycopy"); 3009 3010 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 3011 "jlong_disjoint_arraycopy"); 3012 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 3013 &entry_jlong_arraycopy, "jlong_arraycopy"); 3014 3015 3016 if (UseCompressedOops) { 3017 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 3018 "oop_disjoint_arraycopy"); 3019 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 3020 &entry_oop_arraycopy, "oop_arraycopy"); 3021 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 3022 "oop_disjoint_arraycopy_uninit", 3023 /*dest_uninitialized*/true); 3024 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 3025 NULL, "oop_arraycopy_uninit", 3026 /*dest_uninitialized*/true); 3027 } else { 3028 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 3029 "oop_disjoint_arraycopy"); 3030 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 3031 &entry_oop_arraycopy, "oop_arraycopy"); 3032 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 3033 "oop_disjoint_arraycopy_uninit", 3034 /*dest_uninitialized*/true); 3035 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3036 NULL, "oop_arraycopy_uninit", 3037 /*dest_uninitialized*/true); 3038 } 3039 3040 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3041 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3042 /*dest_uninitialized*/true); 3043 3044 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3045 entry_jbyte_arraycopy, 3046 entry_jshort_arraycopy, 3047 entry_jint_arraycopy, 3048 entry_jlong_arraycopy); 3049 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3050 entry_jbyte_arraycopy, 3051 entry_jshort_arraycopy, 3052 entry_jint_arraycopy, 3053 entry_oop_arraycopy, 3054 entry_jlong_arraycopy, 3055 entry_checkcast_arraycopy); 3056 3057 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3058 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3059 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3060 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3061 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3062 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3063 3064 // We don't generate specialized code for HeapWord-aligned source 3065 // arrays, so just use the code we've already generated 3066 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3067 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3068 3069 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3070 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3071 3072 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3073 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3074 3075 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3076 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3077 3078 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3079 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3080 3081 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3082 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3083 } 3084 3085 // AES intrinsic stubs 3086 enum {AESBlockSize = 16}; 3087 3088 address generate_key_shuffle_mask() { 3089 __ align(16); 3090 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3091 address start = __ pc(); 3092 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3093 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3094 return start; 3095 } 3096 3097 address generate_counter_shuffle_mask() { 3098 __ align(16); 3099 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3100 address start = __ pc(); 3101 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3102 __ emit_data64(0x0001020304050607, relocInfo::none); 3103 return start; 3104 } 3105 3106 // Utility routine for loading a 128-bit key word in little endian format 3107 // can optionally specify that the shuffle mask is already in an xmmregister 3108 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3109 __ movdqu(xmmdst, Address(key, offset)); 3110 if (xmm_shuf_mask != NULL) { 3111 __ pshufb(xmmdst, xmm_shuf_mask); 3112 } else { 3113 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3114 } 3115 } 3116 3117 // Utility routine for increase 128bit counter (iv in CTR mode) 3118 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3119 __ pextrq(reg, xmmdst, 0x0); 3120 __ addq(reg, inc_delta); 3121 __ pinsrq(xmmdst, reg, 0x0); 3122 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3123 __ pextrq(reg, xmmdst, 0x01); // Carry 3124 __ addq(reg, 0x01); 3125 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3126 __ BIND(next_block); // next instruction 3127 } 3128 3129 // Arguments: 3130 // 3131 // Inputs: 3132 // c_rarg0 - source byte array address 3133 // c_rarg1 - destination byte array address 3134 // c_rarg2 - K (key) in little endian int array 3135 // 3136 address generate_aescrypt_encryptBlock() { 3137 assert(UseAES, "need AES instructions and misaligned SSE support"); 3138 __ align(CodeEntryAlignment); 3139 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3140 Label L_doLast; 3141 address start = __ pc(); 3142 3143 const Register from = c_rarg0; // source array address 3144 const Register to = c_rarg1; // destination array address 3145 const Register key = c_rarg2; // key array address 3146 const Register keylen = rax; 3147 3148 const XMMRegister xmm_result = xmm0; 3149 const XMMRegister xmm_key_shuf_mask = xmm1; 3150 // On win64 xmm6-xmm15 must be preserved so don't use them. 3151 const XMMRegister xmm_temp1 = xmm2; 3152 const XMMRegister xmm_temp2 = xmm3; 3153 const XMMRegister xmm_temp3 = xmm4; 3154 const XMMRegister xmm_temp4 = xmm5; 3155 3156 __ enter(); // required for proper stackwalking of RuntimeStub frame 3157 3158 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3159 // context for the registers used, where all instructions below are using 128-bit mode 3160 // On EVEX without VL and BW, these instructions will all be AVX. 3161 if (VM_Version::supports_avx512vlbw()) { 3162 __ movl(rax, 0xffff); 3163 __ kmovql(k1, rax); 3164 } 3165 3166 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3167 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3168 3169 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3170 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3171 3172 // For encryption, the java expanded key ordering is just what we need 3173 // we don't know if the key is aligned, hence not using load-execute form 3174 3175 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3176 __ pxor(xmm_result, xmm_temp1); 3177 3178 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3179 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3180 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3181 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3182 3183 __ aesenc(xmm_result, xmm_temp1); 3184 __ aesenc(xmm_result, xmm_temp2); 3185 __ aesenc(xmm_result, xmm_temp3); 3186 __ aesenc(xmm_result, xmm_temp4); 3187 3188 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3189 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3190 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3191 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3192 3193 __ aesenc(xmm_result, xmm_temp1); 3194 __ aesenc(xmm_result, xmm_temp2); 3195 __ aesenc(xmm_result, xmm_temp3); 3196 __ aesenc(xmm_result, xmm_temp4); 3197 3198 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3199 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3200 3201 __ cmpl(keylen, 44); 3202 __ jccb(Assembler::equal, L_doLast); 3203 3204 __ aesenc(xmm_result, xmm_temp1); 3205 __ aesenc(xmm_result, xmm_temp2); 3206 3207 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3208 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3209 3210 __ cmpl(keylen, 52); 3211 __ jccb(Assembler::equal, L_doLast); 3212 3213 __ aesenc(xmm_result, xmm_temp1); 3214 __ aesenc(xmm_result, xmm_temp2); 3215 3216 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3217 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3218 3219 __ BIND(L_doLast); 3220 __ aesenc(xmm_result, xmm_temp1); 3221 __ aesenclast(xmm_result, xmm_temp2); 3222 __ movdqu(Address(to, 0), xmm_result); // store the result 3223 __ xorptr(rax, rax); // return 0 3224 __ leave(); // required for proper stackwalking of RuntimeStub frame 3225 __ ret(0); 3226 3227 return start; 3228 } 3229 3230 3231 // Arguments: 3232 // 3233 // Inputs: 3234 // c_rarg0 - source byte array address 3235 // c_rarg1 - destination byte array address 3236 // c_rarg2 - K (key) in little endian int array 3237 // 3238 address generate_aescrypt_decryptBlock() { 3239 assert(UseAES, "need AES instructions and misaligned SSE support"); 3240 __ align(CodeEntryAlignment); 3241 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3242 Label L_doLast; 3243 address start = __ pc(); 3244 3245 const Register from = c_rarg0; // source array address 3246 const Register to = c_rarg1; // destination array address 3247 const Register key = c_rarg2; // key array address 3248 const Register keylen = rax; 3249 3250 const XMMRegister xmm_result = xmm0; 3251 const XMMRegister xmm_key_shuf_mask = xmm1; 3252 // On win64 xmm6-xmm15 must be preserved so don't use them. 3253 const XMMRegister xmm_temp1 = xmm2; 3254 const XMMRegister xmm_temp2 = xmm3; 3255 const XMMRegister xmm_temp3 = xmm4; 3256 const XMMRegister xmm_temp4 = xmm5; 3257 3258 __ enter(); // required for proper stackwalking of RuntimeStub frame 3259 3260 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3261 // context for the registers used, where all instructions below are using 128-bit mode 3262 // On EVEX without VL and BW, these instructions will all be AVX. 3263 if (VM_Version::supports_avx512vlbw()) { 3264 __ movl(rax, 0xffff); 3265 __ kmovql(k1, rax); 3266 } 3267 3268 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3269 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3270 3271 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3272 __ movdqu(xmm_result, Address(from, 0)); 3273 3274 // for decryption java expanded key ordering is rotated one position from what we want 3275 // so we start from 0x10 here and hit 0x00 last 3276 // we don't know if the key is aligned, hence not using load-execute form 3277 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3278 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3279 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3280 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3281 3282 __ pxor (xmm_result, xmm_temp1); 3283 __ aesdec(xmm_result, xmm_temp2); 3284 __ aesdec(xmm_result, xmm_temp3); 3285 __ aesdec(xmm_result, xmm_temp4); 3286 3287 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3288 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3289 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3290 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3291 3292 __ aesdec(xmm_result, xmm_temp1); 3293 __ aesdec(xmm_result, xmm_temp2); 3294 __ aesdec(xmm_result, xmm_temp3); 3295 __ aesdec(xmm_result, xmm_temp4); 3296 3297 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3298 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3299 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3300 3301 __ cmpl(keylen, 44); 3302 __ jccb(Assembler::equal, L_doLast); 3303 3304 __ aesdec(xmm_result, xmm_temp1); 3305 __ aesdec(xmm_result, xmm_temp2); 3306 3307 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3308 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3309 3310 __ cmpl(keylen, 52); 3311 __ jccb(Assembler::equal, L_doLast); 3312 3313 __ aesdec(xmm_result, xmm_temp1); 3314 __ aesdec(xmm_result, xmm_temp2); 3315 3316 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3317 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3318 3319 __ BIND(L_doLast); 3320 __ aesdec(xmm_result, xmm_temp1); 3321 __ aesdec(xmm_result, xmm_temp2); 3322 3323 // for decryption the aesdeclast operation is always on key+0x00 3324 __ aesdeclast(xmm_result, xmm_temp3); 3325 __ movdqu(Address(to, 0), xmm_result); // store the result 3326 __ xorptr(rax, rax); // return 0 3327 __ leave(); // required for proper stackwalking of RuntimeStub frame 3328 __ ret(0); 3329 3330 return start; 3331 } 3332 3333 3334 // Arguments: 3335 // 3336 // Inputs: 3337 // c_rarg0 - source byte array address 3338 // c_rarg1 - destination byte array address 3339 // c_rarg2 - K (key) in little endian int array 3340 // c_rarg3 - r vector byte array address 3341 // c_rarg4 - input length 3342 // 3343 // Output: 3344 // rax - input length 3345 // 3346 address generate_cipherBlockChaining_encryptAESCrypt() { 3347 assert(UseAES, "need AES instructions and misaligned SSE support"); 3348 __ align(CodeEntryAlignment); 3349 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3350 address start = __ pc(); 3351 3352 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3353 const Register from = c_rarg0; // source array address 3354 const Register to = c_rarg1; // destination array address 3355 const Register key = c_rarg2; // key array address 3356 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3357 // and left with the results of the last encryption block 3358 #ifndef _WIN64 3359 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3360 #else 3361 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3362 const Register len_reg = r10; // pick the first volatile windows register 3363 #endif 3364 const Register pos = rax; 3365 3366 // xmm register assignments for the loops below 3367 const XMMRegister xmm_result = xmm0; 3368 const XMMRegister xmm_temp = xmm1; 3369 // keys 0-10 preloaded into xmm2-xmm12 3370 const int XMM_REG_NUM_KEY_FIRST = 2; 3371 const int XMM_REG_NUM_KEY_LAST = 15; 3372 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3373 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3374 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3375 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3376 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3377 3378 __ enter(); // required for proper stackwalking of RuntimeStub frame 3379 3380 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3381 // context for the registers used, where all instructions below are using 128-bit mode 3382 // On EVEX without VL and BW, these instructions will all be AVX. 3383 if (VM_Version::supports_avx512vlbw()) { 3384 __ movl(rax, 0xffff); 3385 __ kmovql(k1, rax); 3386 } 3387 3388 #ifdef _WIN64 3389 // on win64, fill len_reg from stack position 3390 __ movl(len_reg, len_mem); 3391 // save the xmm registers which must be preserved 6-15 3392 __ subptr(rsp, -rsp_after_call_off * wordSize); 3393 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3394 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3395 } 3396 #else 3397 __ push(len_reg); // Save 3398 #endif 3399 3400 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3401 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3402 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3403 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3404 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3405 offset += 0x10; 3406 } 3407 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3408 3409 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3410 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3411 __ cmpl(rax, 44); 3412 __ jcc(Assembler::notEqual, L_key_192_256); 3413 3414 // 128 bit code follows here 3415 __ movptr(pos, 0); 3416 __ align(OptoLoopAlignment); 3417 3418 __ BIND(L_loopTop_128); 3419 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3420 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3421 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3422 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3423 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3424 } 3425 __ aesenclast(xmm_result, xmm_key10); 3426 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3427 // no need to store r to memory until we exit 3428 __ addptr(pos, AESBlockSize); 3429 __ subptr(len_reg, AESBlockSize); 3430 __ jcc(Assembler::notEqual, L_loopTop_128); 3431 3432 __ BIND(L_exit); 3433 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3434 3435 #ifdef _WIN64 3436 // restore xmm regs belonging to calling function 3437 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3438 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3439 } 3440 __ movl(rax, len_mem); 3441 #else 3442 __ pop(rax); // return length 3443 #endif 3444 __ leave(); // required for proper stackwalking of RuntimeStub frame 3445 __ ret(0); 3446 3447 __ BIND(L_key_192_256); 3448 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3449 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3450 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3451 __ cmpl(rax, 52); 3452 __ jcc(Assembler::notEqual, L_key_256); 3453 3454 // 192-bit code follows here (could be changed to use more xmm registers) 3455 __ movptr(pos, 0); 3456 __ align(OptoLoopAlignment); 3457 3458 __ BIND(L_loopTop_192); 3459 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3460 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3461 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3462 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3463 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3464 } 3465 __ aesenclast(xmm_result, xmm_key12); 3466 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3467 // no need to store r to memory until we exit 3468 __ addptr(pos, AESBlockSize); 3469 __ subptr(len_reg, AESBlockSize); 3470 __ jcc(Assembler::notEqual, L_loopTop_192); 3471 __ jmp(L_exit); 3472 3473 __ BIND(L_key_256); 3474 // 256-bit code follows here (could be changed to use more xmm registers) 3475 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3476 __ movptr(pos, 0); 3477 __ align(OptoLoopAlignment); 3478 3479 __ BIND(L_loopTop_256); 3480 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3481 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3482 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3483 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3484 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3485 } 3486 load_key(xmm_temp, key, 0xe0); 3487 __ aesenclast(xmm_result, xmm_temp); 3488 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3489 // no need to store r to memory until we exit 3490 __ addptr(pos, AESBlockSize); 3491 __ subptr(len_reg, AESBlockSize); 3492 __ jcc(Assembler::notEqual, L_loopTop_256); 3493 __ jmp(L_exit); 3494 3495 return start; 3496 } 3497 3498 // Safefetch stubs. 3499 void generate_safefetch(const char* name, int size, address* entry, 3500 address* fault_pc, address* continuation_pc) { 3501 // safefetch signatures: 3502 // int SafeFetch32(int* adr, int errValue); 3503 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3504 // 3505 // arguments: 3506 // c_rarg0 = adr 3507 // c_rarg1 = errValue 3508 // 3509 // result: 3510 // PPC_RET = *adr or errValue 3511 3512 StubCodeMark mark(this, "StubRoutines", name); 3513 3514 // Entry point, pc or function descriptor. 3515 *entry = __ pc(); 3516 3517 // Load *adr into c_rarg1, may fault. 3518 *fault_pc = __ pc(); 3519 switch (size) { 3520 case 4: 3521 // int32_t 3522 __ movl(c_rarg1, Address(c_rarg0, 0)); 3523 break; 3524 case 8: 3525 // int64_t 3526 __ movq(c_rarg1, Address(c_rarg0, 0)); 3527 break; 3528 default: 3529 ShouldNotReachHere(); 3530 } 3531 3532 // return errValue or *adr 3533 *continuation_pc = __ pc(); 3534 __ movq(rax, c_rarg1); 3535 __ ret(0); 3536 } 3537 3538 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3539 // to hide instruction latency 3540 // 3541 // Arguments: 3542 // 3543 // Inputs: 3544 // c_rarg0 - source byte array address 3545 // c_rarg1 - destination byte array address 3546 // c_rarg2 - K (key) in little endian int array 3547 // c_rarg3 - r vector byte array address 3548 // c_rarg4 - input length 3549 // 3550 // Output: 3551 // rax - input length 3552 // 3553 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3554 assert(UseAES, "need AES instructions and misaligned SSE support"); 3555 __ align(CodeEntryAlignment); 3556 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3557 address start = __ pc(); 3558 3559 const Register from = c_rarg0; // source array address 3560 const Register to = c_rarg1; // destination array address 3561 const Register key = c_rarg2; // key array address 3562 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3563 // and left with the results of the last encryption block 3564 #ifndef _WIN64 3565 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3566 #else 3567 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3568 const Register len_reg = r10; // pick the first volatile windows register 3569 #endif 3570 const Register pos = rax; 3571 3572 const int PARALLEL_FACTOR = 4; 3573 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3574 3575 Label L_exit; 3576 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3577 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3578 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3579 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3580 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3581 3582 // keys 0-10 preloaded into xmm5-xmm15 3583 const int XMM_REG_NUM_KEY_FIRST = 5; 3584 const int XMM_REG_NUM_KEY_LAST = 15; 3585 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3586 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3587 3588 __ enter(); // required for proper stackwalking of RuntimeStub frame 3589 3590 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3591 // context for the registers used, where all instructions below are using 128-bit mode 3592 // On EVEX without VL and BW, these instructions will all be AVX. 3593 if (VM_Version::supports_avx512vlbw()) { 3594 __ movl(rax, 0xffff); 3595 __ kmovql(k1, rax); 3596 } 3597 3598 #ifdef _WIN64 3599 // on win64, fill len_reg from stack position 3600 __ movl(len_reg, len_mem); 3601 // save the xmm registers which must be preserved 6-15 3602 __ subptr(rsp, -rsp_after_call_off * wordSize); 3603 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3604 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3605 } 3606 #else 3607 __ push(len_reg); // Save 3608 #endif 3609 __ push(rbx); 3610 // the java expanded key ordering is rotated one position from what we want 3611 // so we start from 0x10 here and hit 0x00 last 3612 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3613 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3614 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3615 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3616 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3617 offset += 0x10; 3618 } 3619 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3620 3621 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3622 3623 // registers holding the four results in the parallelized loop 3624 const XMMRegister xmm_result0 = xmm0; 3625 const XMMRegister xmm_result1 = xmm2; 3626 const XMMRegister xmm_result2 = xmm3; 3627 const XMMRegister xmm_result3 = xmm4; 3628 3629 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3630 3631 __ xorptr(pos, pos); 3632 3633 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3634 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3635 __ cmpl(rbx, 52); 3636 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3637 __ cmpl(rbx, 60); 3638 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3639 3640 #define DoFour(opc, src_reg) \ 3641 __ opc(xmm_result0, src_reg); \ 3642 __ opc(xmm_result1, src_reg); \ 3643 __ opc(xmm_result2, src_reg); \ 3644 __ opc(xmm_result3, src_reg); \ 3645 3646 for (int k = 0; k < 3; ++k) { 3647 __ BIND(L_multiBlock_loopTopHead[k]); 3648 if (k != 0) { 3649 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3650 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3651 } 3652 if (k == 1) { 3653 __ subptr(rsp, 6 * wordSize); 3654 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3655 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3656 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3657 load_key(xmm1, key, 0xc0); // 0xc0; 3658 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3659 } else if (k == 2) { 3660 __ subptr(rsp, 10 * wordSize); 3661 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3662 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3663 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3664 load_key(xmm1, key, 0xe0); // 0xe0; 3665 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3666 load_key(xmm15, key, 0xb0); // 0xb0; 3667 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3668 load_key(xmm1, key, 0xc0); // 0xc0; 3669 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3670 } 3671 __ align(OptoLoopAlignment); 3672 __ BIND(L_multiBlock_loopTop[k]); 3673 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3674 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3675 3676 if (k != 0) { 3677 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3678 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3679 } 3680 3681 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3682 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3683 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3684 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3685 3686 DoFour(pxor, xmm_key_first); 3687 if (k == 0) { 3688 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3689 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3690 } 3691 DoFour(aesdeclast, xmm_key_last); 3692 } else if (k == 1) { 3693 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3694 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3695 } 3696 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3697 DoFour(aesdec, xmm1); // key : 0xc0 3698 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3699 DoFour(aesdeclast, xmm_key_last); 3700 } else if (k == 2) { 3701 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3702 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3703 } 3704 DoFour(aesdec, xmm1); // key : 0xc0 3705 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3706 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3707 DoFour(aesdec, xmm15); // key : 0xd0 3708 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3709 DoFour(aesdec, xmm1); // key : 0xe0 3710 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3711 DoFour(aesdeclast, xmm_key_last); 3712 } 3713 3714 // for each result, xor with the r vector of previous cipher block 3715 __ pxor(xmm_result0, xmm_prev_block_cipher); 3716 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3717 __ pxor(xmm_result1, xmm_prev_block_cipher); 3718 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3719 __ pxor(xmm_result2, xmm_prev_block_cipher); 3720 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3721 __ pxor(xmm_result3, xmm_prev_block_cipher); 3722 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3723 if (k != 0) { 3724 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3725 } 3726 3727 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3728 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3729 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3730 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3731 3732 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3733 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3734 __ jmp(L_multiBlock_loopTop[k]); 3735 3736 // registers used in the non-parallelized loops 3737 // xmm register assignments for the loops below 3738 const XMMRegister xmm_result = xmm0; 3739 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3740 const XMMRegister xmm_key11 = xmm3; 3741 const XMMRegister xmm_key12 = xmm4; 3742 const XMMRegister key_tmp = xmm4; 3743 3744 __ BIND(L_singleBlock_loopTopHead[k]); 3745 if (k == 1) { 3746 __ addptr(rsp, 6 * wordSize); 3747 } else if (k == 2) { 3748 __ addptr(rsp, 10 * wordSize); 3749 } 3750 __ cmpptr(len_reg, 0); // any blocks left?? 3751 __ jcc(Assembler::equal, L_exit); 3752 __ BIND(L_singleBlock_loopTopHead2[k]); 3753 if (k == 1) { 3754 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3755 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3756 } 3757 if (k == 2) { 3758 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3759 } 3760 __ align(OptoLoopAlignment); 3761 __ BIND(L_singleBlock_loopTop[k]); 3762 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3763 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3764 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3765 for (int rnum = 1; rnum <= 9 ; rnum++) { 3766 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3767 } 3768 if (k == 1) { 3769 __ aesdec(xmm_result, xmm_key11); 3770 __ aesdec(xmm_result, xmm_key12); 3771 } 3772 if (k == 2) { 3773 __ aesdec(xmm_result, xmm_key11); 3774 load_key(key_tmp, key, 0xc0); 3775 __ aesdec(xmm_result, key_tmp); 3776 load_key(key_tmp, key, 0xd0); 3777 __ aesdec(xmm_result, key_tmp); 3778 load_key(key_tmp, key, 0xe0); 3779 __ aesdec(xmm_result, key_tmp); 3780 } 3781 3782 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3783 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3784 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3785 // no need to store r to memory until we exit 3786 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3787 __ addptr(pos, AESBlockSize); 3788 __ subptr(len_reg, AESBlockSize); 3789 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3790 if (k != 2) { 3791 __ jmp(L_exit); 3792 } 3793 } //for 128/192/256 3794 3795 __ BIND(L_exit); 3796 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3797 __ pop(rbx); 3798 #ifdef _WIN64 3799 // restore regs belonging to calling function 3800 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3801 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3802 } 3803 __ movl(rax, len_mem); 3804 #else 3805 __ pop(rax); // return length 3806 #endif 3807 __ leave(); // required for proper stackwalking of RuntimeStub frame 3808 __ ret(0); 3809 return start; 3810 } 3811 3812 address generate_upper_word_mask() { 3813 __ align(64); 3814 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3815 address start = __ pc(); 3816 __ emit_data64(0x0000000000000000, relocInfo::none); 3817 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3818 return start; 3819 } 3820 3821 address generate_shuffle_byte_flip_mask() { 3822 __ align(64); 3823 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3824 address start = __ pc(); 3825 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3826 __ emit_data64(0x0001020304050607, relocInfo::none); 3827 return start; 3828 } 3829 3830 // ofs and limit are use for multi-block byte array. 3831 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3832 address generate_sha1_implCompress(bool multi_block, const char *name) { 3833 __ align(CodeEntryAlignment); 3834 StubCodeMark mark(this, "StubRoutines", name); 3835 address start = __ pc(); 3836 3837 Register buf = c_rarg0; 3838 Register state = c_rarg1; 3839 Register ofs = c_rarg2; 3840 Register limit = c_rarg3; 3841 3842 const XMMRegister abcd = xmm0; 3843 const XMMRegister e0 = xmm1; 3844 const XMMRegister e1 = xmm2; 3845 const XMMRegister msg0 = xmm3; 3846 3847 const XMMRegister msg1 = xmm4; 3848 const XMMRegister msg2 = xmm5; 3849 const XMMRegister msg3 = xmm6; 3850 const XMMRegister shuf_mask = xmm7; 3851 3852 __ enter(); 3853 3854 #ifdef _WIN64 3855 // save the xmm registers which must be preserved 6-7 3856 __ subptr(rsp, 4 * wordSize); 3857 __ movdqu(Address(rsp, 0), xmm6); 3858 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3859 #endif 3860 3861 __ subptr(rsp, 4 * wordSize); 3862 3863 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3864 buf, state, ofs, limit, rsp, multi_block); 3865 3866 __ addptr(rsp, 4 * wordSize); 3867 #ifdef _WIN64 3868 // restore xmm regs belonging to calling function 3869 __ movdqu(xmm6, Address(rsp, 0)); 3870 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3871 __ addptr(rsp, 4 * wordSize); 3872 #endif 3873 3874 __ leave(); 3875 __ ret(0); 3876 return start; 3877 } 3878 3879 address generate_pshuffle_byte_flip_mask() { 3880 __ align(64); 3881 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3882 address start = __ pc(); 3883 __ emit_data64(0x0405060700010203, relocInfo::none); 3884 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3885 3886 if (VM_Version::supports_avx2()) { 3887 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3888 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3889 // _SHUF_00BA 3890 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3891 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3892 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3893 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3894 // _SHUF_DC00 3895 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3896 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3897 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3898 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3899 } 3900 3901 return start; 3902 } 3903 3904 // ofs and limit are use for multi-block byte array. 3905 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3906 address generate_sha256_implCompress(bool multi_block, const char *name) { 3907 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3908 __ align(CodeEntryAlignment); 3909 StubCodeMark mark(this, "StubRoutines", name); 3910 address start = __ pc(); 3911 3912 Register buf = c_rarg0; 3913 Register state = c_rarg1; 3914 Register ofs = c_rarg2; 3915 Register limit = c_rarg3; 3916 3917 const XMMRegister msg = xmm0; 3918 const XMMRegister state0 = xmm1; 3919 const XMMRegister state1 = xmm2; 3920 const XMMRegister msgtmp0 = xmm3; 3921 3922 const XMMRegister msgtmp1 = xmm4; 3923 const XMMRegister msgtmp2 = xmm5; 3924 const XMMRegister msgtmp3 = xmm6; 3925 const XMMRegister msgtmp4 = xmm7; 3926 3927 const XMMRegister shuf_mask = xmm8; 3928 3929 __ enter(); 3930 #ifdef _WIN64 3931 // save the xmm registers which must be preserved 6-7 3932 __ subptr(rsp, 6 * wordSize); 3933 __ movdqu(Address(rsp, 0), xmm6); 3934 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3935 __ movdqu(Address(rsp, 4 * wordSize), xmm8); 3936 3937 if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) { 3938 __ subptr(rsp, 10 * wordSize); 3939 __ movdqu(Address(rsp, 0), xmm9); 3940 __ movdqu(Address(rsp, 2 * wordSize), xmm10); 3941 __ movdqu(Address(rsp, 4 * wordSize), xmm11); 3942 __ movdqu(Address(rsp, 6 * wordSize), xmm12); 3943 __ movdqu(Address(rsp, 8 * wordSize), xmm13); 3944 } 3945 #endif 3946 3947 __ subptr(rsp, 4 * wordSize); 3948 3949 if (VM_Version::supports_sha()) { 3950 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3951 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3952 } else if (VM_Version::supports_avx2()) { 3953 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3954 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3955 } 3956 __ addptr(rsp, 4 * wordSize); 3957 #ifdef _WIN64 3958 // restore xmm regs belonging to calling function 3959 if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) { 3960 __ movdqu(xmm9, Address(rsp, 0)); 3961 __ movdqu(xmm10, Address(rsp, 2 * wordSize)); 3962 __ movdqu(xmm11, Address(rsp, 4 * wordSize)); 3963 __ movdqu(xmm12, Address(rsp, 6 * wordSize)); 3964 __ movdqu(xmm13, Address(rsp, 8 * wordSize)); 3965 __ addptr(rsp, 10 * wordSize); 3966 } 3967 __ movdqu(xmm6, Address(rsp, 0)); 3968 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3969 __ movdqu(xmm8, Address(rsp, 4 * wordSize)); 3970 __ addptr(rsp, 6 * wordSize); 3971 #endif 3972 __ leave(); 3973 __ ret(0); 3974 return start; 3975 } 3976 3977 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3978 // to hide instruction latency 3979 // 3980 // Arguments: 3981 // 3982 // Inputs: 3983 // c_rarg0 - source byte array address 3984 // c_rarg1 - destination byte array address 3985 // c_rarg2 - K (key) in little endian int array 3986 // c_rarg3 - counter vector byte array address 3987 // Linux 3988 // c_rarg4 - input length 3989 // c_rarg5 - saved encryptedCounter start 3990 // rbp + 6 * wordSize - saved used length 3991 // Windows 3992 // rbp + 6 * wordSize - input length 3993 // rbp + 7 * wordSize - saved encryptedCounter start 3994 // rbp + 8 * wordSize - saved used length 3995 // 3996 // Output: 3997 // rax - input length 3998 // 3999 address generate_counterMode_AESCrypt_Parallel() { 4000 assert(UseAES, "need AES instructions and misaligned SSE support"); 4001 __ align(CodeEntryAlignment); 4002 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4003 address start = __ pc(); 4004 const Register from = c_rarg0; // source array address 4005 const Register to = c_rarg1; // destination array address 4006 const Register key = c_rarg2; // key array address 4007 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4008 // and updated with the incremented counter in the end 4009 #ifndef _WIN64 4010 const Register len_reg = c_rarg4; 4011 const Register saved_encCounter_start = c_rarg5; 4012 const Register used_addr = r10; 4013 const Address used_mem(rbp, 2 * wordSize); 4014 const Register used = r11; 4015 #else 4016 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4017 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 4018 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 4019 const Register len_reg = r10; // pick the first volatile windows register 4020 const Register saved_encCounter_start = r11; 4021 const Register used_addr = r13; 4022 const Register used = r14; 4023 #endif 4024 const Register pos = rax; 4025 4026 const int PARALLEL_FACTOR = 6; 4027 const XMMRegister xmm_counter_shuf_mask = xmm0; 4028 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 4029 const XMMRegister xmm_curr_counter = xmm2; 4030 4031 const XMMRegister xmm_key_tmp0 = xmm3; 4032 const XMMRegister xmm_key_tmp1 = xmm4; 4033 4034 // registers holding the four results in the parallelized loop 4035 const XMMRegister xmm_result0 = xmm5; 4036 const XMMRegister xmm_result1 = xmm6; 4037 const XMMRegister xmm_result2 = xmm7; 4038 const XMMRegister xmm_result3 = xmm8; 4039 const XMMRegister xmm_result4 = xmm9; 4040 const XMMRegister xmm_result5 = xmm10; 4041 4042 const XMMRegister xmm_from0 = xmm11; 4043 const XMMRegister xmm_from1 = xmm12; 4044 const XMMRegister xmm_from2 = xmm13; 4045 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4046 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4047 const XMMRegister xmm_from5 = xmm4; 4048 4049 //for key_128, key_192, key_256 4050 const int rounds[3] = {10, 12, 14}; 4051 Label L_exit_preLoop, L_preLoop_start; 4052 Label L_multiBlock_loopTop[3]; 4053 Label L_singleBlockLoopTop[3]; 4054 Label L__incCounter[3][6]; //for 6 blocks 4055 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4056 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4057 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4058 4059 Label L_exit; 4060 4061 __ enter(); // required for proper stackwalking of RuntimeStub frame 4062 4063 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4064 // context for the registers used, where all instructions below are using 128-bit mode 4065 // On EVEX without VL and BW, these instructions will all be AVX. 4066 if (VM_Version::supports_avx512vlbw()) { 4067 __ movl(rax, 0xffff); 4068 __ kmovql(k1, rax); 4069 } 4070 4071 #ifdef _WIN64 4072 // save the xmm registers which must be preserved 6-14 4073 const int XMM_REG_NUM_KEY_LAST = 14; 4074 __ subptr(rsp, -rsp_after_call_off * wordSize); 4075 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 4076 __ movdqu(xmm_save(i), as_XMMRegister(i)); 4077 } 4078 4079 const Address r13_save(rbp, rdi_off * wordSize); 4080 const Address r14_save(rbp, rsi_off * wordSize); 4081 4082 __ movptr(r13_save, r13); 4083 __ movptr(r14_save, r14); 4084 4085 // on win64, fill len_reg from stack position 4086 __ movl(len_reg, len_mem); 4087 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4088 __ movptr(used_addr, used_mem); 4089 __ movl(used, Address(used_addr, 0)); 4090 #else 4091 __ push(len_reg); // Save 4092 __ movptr(used_addr, used_mem); 4093 __ movl(used, Address(used_addr, 0)); 4094 #endif 4095 4096 __ push(rbx); // Save RBX 4097 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4098 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 4099 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4100 __ movptr(pos, 0); 4101 4102 // Use the partially used encrpyted counter from last invocation 4103 __ BIND(L_preLoop_start); 4104 __ cmpptr(used, 16); 4105 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4106 __ cmpptr(len_reg, 0); 4107 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4108 __ movb(rbx, Address(saved_encCounter_start, used)); 4109 __ xorb(rbx, Address(from, pos)); 4110 __ movb(Address(to, pos), rbx); 4111 __ addptr(pos, 1); 4112 __ addptr(used, 1); 4113 __ subptr(len_reg, 1); 4114 4115 __ jmp(L_preLoop_start); 4116 4117 __ BIND(L_exit_preLoop); 4118 __ movl(Address(used_addr, 0), used); 4119 4120 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4121 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4122 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4123 __ cmpl(rbx, 52); 4124 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4125 __ cmpl(rbx, 60); 4126 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4127 4128 #define CTR_DoSix(opc, src_reg) \ 4129 __ opc(xmm_result0, src_reg); \ 4130 __ opc(xmm_result1, src_reg); \ 4131 __ opc(xmm_result2, src_reg); \ 4132 __ opc(xmm_result3, src_reg); \ 4133 __ opc(xmm_result4, src_reg); \ 4134 __ opc(xmm_result5, src_reg); 4135 4136 // k == 0 : generate code for key_128 4137 // k == 1 : generate code for key_192 4138 // k == 2 : generate code for key_256 4139 for (int k = 0; k < 3; ++k) { 4140 //multi blocks starts here 4141 __ align(OptoLoopAlignment); 4142 __ BIND(L_multiBlock_loopTop[k]); 4143 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4144 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4145 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4146 4147 //load, then increase counters 4148 CTR_DoSix(movdqa, xmm_curr_counter); 4149 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4150 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4151 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4152 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4153 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4154 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4155 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4156 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4157 4158 //load two ROUND_KEYs at a time 4159 for (int i = 1; i < rounds[k]; ) { 4160 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4161 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4162 CTR_DoSix(aesenc, xmm_key_tmp1); 4163 i++; 4164 if (i != rounds[k]) { 4165 CTR_DoSix(aesenc, xmm_key_tmp0); 4166 } else { 4167 CTR_DoSix(aesenclast, xmm_key_tmp0); 4168 } 4169 i++; 4170 } 4171 4172 // get next PARALLEL_FACTOR blocks into xmm_result registers 4173 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4174 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4175 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4176 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4177 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4178 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4179 4180 __ pxor(xmm_result0, xmm_from0); 4181 __ pxor(xmm_result1, xmm_from1); 4182 __ pxor(xmm_result2, xmm_from2); 4183 __ pxor(xmm_result3, xmm_from3); 4184 __ pxor(xmm_result4, xmm_from4); 4185 __ pxor(xmm_result5, xmm_from5); 4186 4187 // store 6 results into the next 64 bytes of output 4188 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4189 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4190 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4191 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4192 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4193 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4194 4195 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4196 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4197 __ jmp(L_multiBlock_loopTop[k]); 4198 4199 // singleBlock starts here 4200 __ align(OptoLoopAlignment); 4201 __ BIND(L_singleBlockLoopTop[k]); 4202 __ cmpptr(len_reg, 0); 4203 __ jcc(Assembler::lessEqual, L_exit); 4204 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4205 __ movdqa(xmm_result0, xmm_curr_counter); 4206 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4207 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4208 __ pxor(xmm_result0, xmm_key_tmp0); 4209 for (int i = 1; i < rounds[k]; i++) { 4210 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4211 __ aesenc(xmm_result0, xmm_key_tmp0); 4212 } 4213 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4214 __ aesenclast(xmm_result0, xmm_key_tmp0); 4215 __ cmpptr(len_reg, AESBlockSize); 4216 __ jcc(Assembler::less, L_processTail_insr[k]); 4217 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4218 __ pxor(xmm_result0, xmm_from0); 4219 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4220 __ addptr(pos, AESBlockSize); 4221 __ subptr(len_reg, AESBlockSize); 4222 __ jmp(L_singleBlockLoopTop[k]); 4223 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4224 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4225 __ testptr(len_reg, 8); 4226 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4227 __ subptr(pos,8); 4228 __ pinsrq(xmm_from0, Address(from, pos), 0); 4229 __ BIND(L_processTail_4_insr[k]); 4230 __ testptr(len_reg, 4); 4231 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4232 __ subptr(pos,4); 4233 __ pslldq(xmm_from0, 4); 4234 __ pinsrd(xmm_from0, Address(from, pos), 0); 4235 __ BIND(L_processTail_2_insr[k]); 4236 __ testptr(len_reg, 2); 4237 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4238 __ subptr(pos, 2); 4239 __ pslldq(xmm_from0, 2); 4240 __ pinsrw(xmm_from0, Address(from, pos), 0); 4241 __ BIND(L_processTail_1_insr[k]); 4242 __ testptr(len_reg, 1); 4243 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4244 __ subptr(pos, 1); 4245 __ pslldq(xmm_from0, 1); 4246 __ pinsrb(xmm_from0, Address(from, pos), 0); 4247 __ BIND(L_processTail_exit_insr[k]); 4248 4249 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4250 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4251 4252 __ testptr(len_reg, 8); 4253 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4254 __ pextrq(Address(to, pos), xmm_result0, 0); 4255 __ psrldq(xmm_result0, 8); 4256 __ addptr(pos, 8); 4257 __ BIND(L_processTail_4_extr[k]); 4258 __ testptr(len_reg, 4); 4259 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4260 __ pextrd(Address(to, pos), xmm_result0, 0); 4261 __ psrldq(xmm_result0, 4); 4262 __ addptr(pos, 4); 4263 __ BIND(L_processTail_2_extr[k]); 4264 __ testptr(len_reg, 2); 4265 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4266 __ pextrw(Address(to, pos), xmm_result0, 0); 4267 __ psrldq(xmm_result0, 2); 4268 __ addptr(pos, 2); 4269 __ BIND(L_processTail_1_extr[k]); 4270 __ testptr(len_reg, 1); 4271 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4272 __ pextrb(Address(to, pos), xmm_result0, 0); 4273 4274 __ BIND(L_processTail_exit_extr[k]); 4275 __ movl(Address(used_addr, 0), len_reg); 4276 __ jmp(L_exit); 4277 4278 } 4279 4280 __ BIND(L_exit); 4281 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4282 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4283 __ pop(rbx); // pop the saved RBX. 4284 #ifdef _WIN64 4285 // restore regs belonging to calling function 4286 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 4287 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4288 } 4289 __ movl(rax, len_mem); 4290 __ movptr(r13, r13_save); 4291 __ movptr(r14, r14_save); 4292 #else 4293 __ pop(rax); // return 'len' 4294 #endif 4295 __ leave(); // required for proper stackwalking of RuntimeStub frame 4296 __ ret(0); 4297 return start; 4298 } 4299 4300 // byte swap x86 long 4301 address generate_ghash_long_swap_mask() { 4302 __ align(CodeEntryAlignment); 4303 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4304 address start = __ pc(); 4305 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4306 __ emit_data64(0x0706050403020100, relocInfo::none ); 4307 return start; 4308 } 4309 4310 // byte swap x86 byte array 4311 address generate_ghash_byte_swap_mask() { 4312 __ align(CodeEntryAlignment); 4313 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4314 address start = __ pc(); 4315 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4316 __ emit_data64(0x0001020304050607, relocInfo::none ); 4317 return start; 4318 } 4319 4320 /* Single and multi-block ghash operations */ 4321 address generate_ghash_processBlocks() { 4322 __ align(CodeEntryAlignment); 4323 Label L_ghash_loop, L_exit; 4324 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4325 address start = __ pc(); 4326 4327 const Register state = c_rarg0; 4328 const Register subkeyH = c_rarg1; 4329 const Register data = c_rarg2; 4330 const Register blocks = c_rarg3; 4331 4332 #ifdef _WIN64 4333 const int XMM_REG_LAST = 10; 4334 #endif 4335 4336 const XMMRegister xmm_temp0 = xmm0; 4337 const XMMRegister xmm_temp1 = xmm1; 4338 const XMMRegister xmm_temp2 = xmm2; 4339 const XMMRegister xmm_temp3 = xmm3; 4340 const XMMRegister xmm_temp4 = xmm4; 4341 const XMMRegister xmm_temp5 = xmm5; 4342 const XMMRegister xmm_temp6 = xmm6; 4343 const XMMRegister xmm_temp7 = xmm7; 4344 const XMMRegister xmm_temp8 = xmm8; 4345 const XMMRegister xmm_temp9 = xmm9; 4346 const XMMRegister xmm_temp10 = xmm10; 4347 4348 __ enter(); 4349 4350 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4351 // context for the registers used, where all instructions below are using 128-bit mode 4352 // On EVEX without VL and BW, these instructions will all be AVX. 4353 if (VM_Version::supports_avx512vlbw()) { 4354 __ movl(rax, 0xffff); 4355 __ kmovql(k1, rax); 4356 } 4357 4358 #ifdef _WIN64 4359 // save the xmm registers which must be preserved 6-10 4360 __ subptr(rsp, -rsp_after_call_off * wordSize); 4361 for (int i = 6; i <= XMM_REG_LAST; i++) { 4362 __ movdqu(xmm_save(i), as_XMMRegister(i)); 4363 } 4364 #endif 4365 4366 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4367 4368 __ movdqu(xmm_temp0, Address(state, 0)); 4369 __ pshufb(xmm_temp0, xmm_temp10); 4370 4371 4372 __ BIND(L_ghash_loop); 4373 __ movdqu(xmm_temp2, Address(data, 0)); 4374 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4375 4376 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4377 __ pshufb(xmm_temp1, xmm_temp10); 4378 4379 __ pxor(xmm_temp0, xmm_temp2); 4380 4381 // 4382 // Multiply with the hash key 4383 // 4384 __ movdqu(xmm_temp3, xmm_temp0); 4385 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4386 __ movdqu(xmm_temp4, xmm_temp0); 4387 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4388 4389 __ movdqu(xmm_temp5, xmm_temp0); 4390 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4391 __ movdqu(xmm_temp6, xmm_temp0); 4392 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4393 4394 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4395 4396 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4397 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4398 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4399 __ pxor(xmm_temp3, xmm_temp5); 4400 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4401 // of the carry-less multiplication of 4402 // xmm0 by xmm1. 4403 4404 // We shift the result of the multiplication by one bit position 4405 // to the left to cope for the fact that the bits are reversed. 4406 __ movdqu(xmm_temp7, xmm_temp3); 4407 __ movdqu(xmm_temp8, xmm_temp6); 4408 __ pslld(xmm_temp3, 1); 4409 __ pslld(xmm_temp6, 1); 4410 __ psrld(xmm_temp7, 31); 4411 __ psrld(xmm_temp8, 31); 4412 __ movdqu(xmm_temp9, xmm_temp7); 4413 __ pslldq(xmm_temp8, 4); 4414 __ pslldq(xmm_temp7, 4); 4415 __ psrldq(xmm_temp9, 12); 4416 __ por(xmm_temp3, xmm_temp7); 4417 __ por(xmm_temp6, xmm_temp8); 4418 __ por(xmm_temp6, xmm_temp9); 4419 4420 // 4421 // First phase of the reduction 4422 // 4423 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4424 // independently. 4425 __ movdqu(xmm_temp7, xmm_temp3); 4426 __ movdqu(xmm_temp8, xmm_temp3); 4427 __ movdqu(xmm_temp9, xmm_temp3); 4428 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4429 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4430 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4431 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4432 __ pxor(xmm_temp7, xmm_temp9); 4433 __ movdqu(xmm_temp8, xmm_temp7); 4434 __ pslldq(xmm_temp7, 12); 4435 __ psrldq(xmm_temp8, 4); 4436 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4437 4438 // 4439 // Second phase of the reduction 4440 // 4441 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4442 // shift operations. 4443 __ movdqu(xmm_temp2, xmm_temp3); 4444 __ movdqu(xmm_temp4, xmm_temp3); 4445 __ movdqu(xmm_temp5, xmm_temp3); 4446 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4447 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4448 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4449 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4450 __ pxor(xmm_temp2, xmm_temp5); 4451 __ pxor(xmm_temp2, xmm_temp8); 4452 __ pxor(xmm_temp3, xmm_temp2); 4453 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4454 4455 __ decrement(blocks); 4456 __ jcc(Assembler::zero, L_exit); 4457 __ movdqu(xmm_temp0, xmm_temp6); 4458 __ addptr(data, 16); 4459 __ jmp(L_ghash_loop); 4460 4461 __ BIND(L_exit); 4462 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4463 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4464 4465 #ifdef _WIN64 4466 // restore xmm regs belonging to calling function 4467 for (int i = 6; i <= XMM_REG_LAST; i++) { 4468 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4469 } 4470 #endif 4471 __ leave(); 4472 __ ret(0); 4473 return start; 4474 } 4475 4476 /** 4477 * Arguments: 4478 * 4479 * Inputs: 4480 * c_rarg0 - int crc 4481 * c_rarg1 - byte* buf 4482 * c_rarg2 - int length 4483 * 4484 * Ouput: 4485 * rax - int crc result 4486 */ 4487 address generate_updateBytesCRC32() { 4488 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4489 4490 __ align(CodeEntryAlignment); 4491 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4492 4493 address start = __ pc(); 4494 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4495 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4496 // rscratch1: r10 4497 const Register crc = c_rarg0; // crc 4498 const Register buf = c_rarg1; // source java byte array address 4499 const Register len = c_rarg2; // length 4500 const Register table = c_rarg3; // crc_table address (reuse register) 4501 const Register tmp = r11; 4502 assert_different_registers(crc, buf, len, table, tmp, rax); 4503 4504 BLOCK_COMMENT("Entry:"); 4505 __ enter(); // required for proper stackwalking of RuntimeStub frame 4506 4507 __ kernel_crc32(crc, buf, len, table, tmp); 4508 4509 __ movl(rax, crc); 4510 __ leave(); // required for proper stackwalking of RuntimeStub frame 4511 __ ret(0); 4512 4513 return start; 4514 } 4515 4516 /** 4517 * Arguments: 4518 * 4519 * Inputs: 4520 * c_rarg0 - int crc 4521 * c_rarg1 - byte* buf 4522 * c_rarg2 - long length 4523 * c_rarg3 - table_start - optional (present only when doing a library_call, 4524 * not used by x86 algorithm) 4525 * 4526 * Ouput: 4527 * rax - int crc result 4528 */ 4529 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4530 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4531 __ align(CodeEntryAlignment); 4532 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4533 address start = __ pc(); 4534 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4535 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4536 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4537 const Register crc = c_rarg0; // crc 4538 const Register buf = c_rarg1; // source java byte array address 4539 const Register len = c_rarg2; // length 4540 const Register a = rax; 4541 const Register j = r9; 4542 const Register k = r10; 4543 const Register l = r11; 4544 #ifdef _WIN64 4545 const Register y = rdi; 4546 const Register z = rsi; 4547 #else 4548 const Register y = rcx; 4549 const Register z = r8; 4550 #endif 4551 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4552 4553 BLOCK_COMMENT("Entry:"); 4554 __ enter(); // required for proper stackwalking of RuntimeStub frame 4555 #ifdef _WIN64 4556 __ push(y); 4557 __ push(z); 4558 #endif 4559 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4560 a, j, k, 4561 l, y, z, 4562 c_farg0, c_farg1, c_farg2, 4563 is_pclmulqdq_supported); 4564 __ movl(rax, crc); 4565 #ifdef _WIN64 4566 __ pop(z); 4567 __ pop(y); 4568 #endif 4569 __ leave(); // required for proper stackwalking of RuntimeStub frame 4570 __ ret(0); 4571 4572 return start; 4573 } 4574 4575 /** 4576 * Arguments: 4577 * 4578 * Input: 4579 * c_rarg0 - x address 4580 * c_rarg1 - x length 4581 * c_rarg2 - y address 4582 * c_rarg3 - y lenth 4583 * not Win64 4584 * c_rarg4 - z address 4585 * c_rarg5 - z length 4586 * Win64 4587 * rsp+40 - z address 4588 * rsp+48 - z length 4589 */ 4590 address generate_multiplyToLen() { 4591 __ align(CodeEntryAlignment); 4592 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4593 4594 address start = __ pc(); 4595 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4596 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4597 const Register x = rdi; 4598 const Register xlen = rax; 4599 const Register y = rsi; 4600 const Register ylen = rcx; 4601 const Register z = r8; 4602 const Register zlen = r11; 4603 4604 // Next registers will be saved on stack in multiply_to_len(). 4605 const Register tmp1 = r12; 4606 const Register tmp2 = r13; 4607 const Register tmp3 = r14; 4608 const Register tmp4 = r15; 4609 const Register tmp5 = rbx; 4610 4611 BLOCK_COMMENT("Entry:"); 4612 __ enter(); // required for proper stackwalking of RuntimeStub frame 4613 4614 #ifndef _WIN64 4615 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4616 #endif 4617 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4618 // ylen => rcx, z => r8, zlen => r11 4619 // r9 and r10 may be used to save non-volatile registers 4620 #ifdef _WIN64 4621 // last 2 arguments (#4, #5) are on stack on Win64 4622 __ movptr(z, Address(rsp, 6 * wordSize)); 4623 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4624 #endif 4625 4626 __ movptr(xlen, rsi); 4627 __ movptr(y, rdx); 4628 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4629 4630 restore_arg_regs(); 4631 4632 __ leave(); // required for proper stackwalking of RuntimeStub frame 4633 __ ret(0); 4634 4635 return start; 4636 } 4637 4638 /** 4639 * Arguments: 4640 * 4641 * Input: 4642 * c_rarg0 - obja address 4643 * c_rarg1 - objb address 4644 * c_rarg3 - length length 4645 * c_rarg4 - scale log2_array_indxscale 4646 * 4647 * Output: 4648 * rax - int >= mismatched index, < 0 bitwise complement of tail 4649 */ 4650 address generate_vectorizedMismatch() { 4651 __ align(CodeEntryAlignment); 4652 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4653 address start = __ pc(); 4654 4655 BLOCK_COMMENT("Entry:"); 4656 __ enter(); 4657 4658 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4659 const Register scale = c_rarg0; //rcx, will exchange with r9 4660 const Register objb = c_rarg1; //rdx 4661 const Register length = c_rarg2; //r8 4662 const Register obja = c_rarg3; //r9 4663 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4664 4665 const Register tmp1 = r10; 4666 const Register tmp2 = r11; 4667 #endif 4668 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4669 const Register obja = c_rarg0; //U:rdi 4670 const Register objb = c_rarg1; //U:rsi 4671 const Register length = c_rarg2; //U:rdx 4672 const Register scale = c_rarg3; //U:rcx 4673 const Register tmp1 = r8; 4674 const Register tmp2 = r9; 4675 #endif 4676 const Register result = rax; //return value 4677 const XMMRegister vec0 = xmm0; 4678 const XMMRegister vec1 = xmm1; 4679 const XMMRegister vec2 = xmm2; 4680 4681 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4682 4683 __ leave(); 4684 __ ret(0); 4685 4686 return start; 4687 } 4688 4689 /** 4690 * Arguments: 4691 * 4692 // Input: 4693 // c_rarg0 - x address 4694 // c_rarg1 - x length 4695 // c_rarg2 - z address 4696 // c_rarg3 - z lenth 4697 * 4698 */ 4699 address generate_squareToLen() { 4700 4701 __ align(CodeEntryAlignment); 4702 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4703 4704 address start = __ pc(); 4705 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4706 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4707 const Register x = rdi; 4708 const Register len = rsi; 4709 const Register z = r8; 4710 const Register zlen = rcx; 4711 4712 const Register tmp1 = r12; 4713 const Register tmp2 = r13; 4714 const Register tmp3 = r14; 4715 const Register tmp4 = r15; 4716 const Register tmp5 = rbx; 4717 4718 BLOCK_COMMENT("Entry:"); 4719 __ enter(); // required for proper stackwalking of RuntimeStub frame 4720 4721 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4722 // zlen => rcx 4723 // r9 and r10 may be used to save non-volatile registers 4724 __ movptr(r8, rdx); 4725 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4726 4727 restore_arg_regs(); 4728 4729 __ leave(); // required for proper stackwalking of RuntimeStub frame 4730 __ ret(0); 4731 4732 return start; 4733 } 4734 4735 /** 4736 * Arguments: 4737 * 4738 * Input: 4739 * c_rarg0 - out address 4740 * c_rarg1 - in address 4741 * c_rarg2 - offset 4742 * c_rarg3 - len 4743 * not Win64 4744 * c_rarg4 - k 4745 * Win64 4746 * rsp+40 - k 4747 */ 4748 address generate_mulAdd() { 4749 __ align(CodeEntryAlignment); 4750 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4751 4752 address start = __ pc(); 4753 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4754 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4755 const Register out = rdi; 4756 const Register in = rsi; 4757 const Register offset = r11; 4758 const Register len = rcx; 4759 const Register k = r8; 4760 4761 // Next registers will be saved on stack in mul_add(). 4762 const Register tmp1 = r12; 4763 const Register tmp2 = r13; 4764 const Register tmp3 = r14; 4765 const Register tmp4 = r15; 4766 const Register tmp5 = rbx; 4767 4768 BLOCK_COMMENT("Entry:"); 4769 __ enter(); // required for proper stackwalking of RuntimeStub frame 4770 4771 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4772 // len => rcx, k => r8 4773 // r9 and r10 may be used to save non-volatile registers 4774 #ifdef _WIN64 4775 // last argument is on stack on Win64 4776 __ movl(k, Address(rsp, 6 * wordSize)); 4777 #endif 4778 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4779 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4780 4781 restore_arg_regs(); 4782 4783 __ leave(); // required for proper stackwalking of RuntimeStub frame 4784 __ ret(0); 4785 4786 return start; 4787 } 4788 4789 address generate_libmExp() { 4790 address start = __ pc(); 4791 4792 const XMMRegister x0 = xmm0; 4793 const XMMRegister x1 = xmm1; 4794 const XMMRegister x2 = xmm2; 4795 const XMMRegister x3 = xmm3; 4796 4797 const XMMRegister x4 = xmm4; 4798 const XMMRegister x5 = xmm5; 4799 const XMMRegister x6 = xmm6; 4800 const XMMRegister x7 = xmm7; 4801 4802 const Register tmp = r11; 4803 4804 BLOCK_COMMENT("Entry:"); 4805 __ enter(); // required for proper stackwalking of RuntimeStub frame 4806 4807 #ifdef _WIN64 4808 // save the xmm registers which must be preserved 6-7 4809 __ subptr(rsp, 4 * wordSize); 4810 __ movdqu(Address(rsp, 0), xmm6); 4811 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4812 #endif 4813 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4814 4815 #ifdef _WIN64 4816 // restore xmm regs belonging to calling function 4817 __ movdqu(xmm6, Address(rsp, 0)); 4818 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4819 __ addptr(rsp, 4 * wordSize); 4820 #endif 4821 4822 __ leave(); // required for proper stackwalking of RuntimeStub frame 4823 __ ret(0); 4824 4825 return start; 4826 4827 } 4828 4829 address generate_libmLog() { 4830 address start = __ pc(); 4831 4832 const XMMRegister x0 = xmm0; 4833 const XMMRegister x1 = xmm1; 4834 const XMMRegister x2 = xmm2; 4835 const XMMRegister x3 = xmm3; 4836 4837 const XMMRegister x4 = xmm4; 4838 const XMMRegister x5 = xmm5; 4839 const XMMRegister x6 = xmm6; 4840 const XMMRegister x7 = xmm7; 4841 4842 const Register tmp1 = r11; 4843 const Register tmp2 = r8; 4844 4845 BLOCK_COMMENT("Entry:"); 4846 __ enter(); // required for proper stackwalking of RuntimeStub frame 4847 4848 #ifdef _WIN64 4849 // save the xmm registers which must be preserved 6-7 4850 __ subptr(rsp, 4 * wordSize); 4851 __ movdqu(Address(rsp, 0), xmm6); 4852 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4853 #endif 4854 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4855 4856 #ifdef _WIN64 4857 // restore xmm regs belonging to calling function 4858 __ movdqu(xmm6, Address(rsp, 0)); 4859 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4860 __ addptr(rsp, 4 * wordSize); 4861 #endif 4862 4863 __ leave(); // required for proper stackwalking of RuntimeStub frame 4864 __ ret(0); 4865 4866 return start; 4867 4868 } 4869 4870 address generate_libmLog10() { 4871 address start = __ pc(); 4872 4873 const XMMRegister x0 = xmm0; 4874 const XMMRegister x1 = xmm1; 4875 const XMMRegister x2 = xmm2; 4876 const XMMRegister x3 = xmm3; 4877 4878 const XMMRegister x4 = xmm4; 4879 const XMMRegister x5 = xmm5; 4880 const XMMRegister x6 = xmm6; 4881 const XMMRegister x7 = xmm7; 4882 4883 const Register tmp = r11; 4884 4885 BLOCK_COMMENT("Entry:"); 4886 __ enter(); // required for proper stackwalking of RuntimeStub frame 4887 4888 #ifdef _WIN64 4889 // save the xmm registers which must be preserved 6-7 4890 __ subptr(rsp, 4 * wordSize); 4891 __ movdqu(Address(rsp, 0), xmm6); 4892 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4893 #endif 4894 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4895 4896 #ifdef _WIN64 4897 // restore xmm regs belonging to calling function 4898 __ movdqu(xmm6, Address(rsp, 0)); 4899 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4900 __ addptr(rsp, 4 * wordSize); 4901 #endif 4902 4903 __ leave(); // required for proper stackwalking of RuntimeStub frame 4904 __ ret(0); 4905 4906 return start; 4907 4908 } 4909 4910 address generate_libmPow() { 4911 address start = __ pc(); 4912 4913 const XMMRegister x0 = xmm0; 4914 const XMMRegister x1 = xmm1; 4915 const XMMRegister x2 = xmm2; 4916 const XMMRegister x3 = xmm3; 4917 4918 const XMMRegister x4 = xmm4; 4919 const XMMRegister x5 = xmm5; 4920 const XMMRegister x6 = xmm6; 4921 const XMMRegister x7 = xmm7; 4922 4923 const Register tmp1 = r8; 4924 const Register tmp2 = r9; 4925 const Register tmp3 = r10; 4926 const Register tmp4 = r11; 4927 4928 BLOCK_COMMENT("Entry:"); 4929 __ enter(); // required for proper stackwalking of RuntimeStub frame 4930 4931 #ifdef _WIN64 4932 // save the xmm registers which must be preserved 6-7 4933 __ subptr(rsp, 4 * wordSize); 4934 __ movdqu(Address(rsp, 0), xmm6); 4935 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4936 #endif 4937 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4938 4939 #ifdef _WIN64 4940 // restore xmm regs belonging to calling function 4941 __ movdqu(xmm6, Address(rsp, 0)); 4942 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4943 __ addptr(rsp, 4 * wordSize); 4944 #endif 4945 4946 __ leave(); // required for proper stackwalking of RuntimeStub frame 4947 __ ret(0); 4948 4949 return start; 4950 4951 } 4952 4953 address generate_libmSin() { 4954 address start = __ pc(); 4955 4956 const XMMRegister x0 = xmm0; 4957 const XMMRegister x1 = xmm1; 4958 const XMMRegister x2 = xmm2; 4959 const XMMRegister x3 = xmm3; 4960 4961 const XMMRegister x4 = xmm4; 4962 const XMMRegister x5 = xmm5; 4963 const XMMRegister x6 = xmm6; 4964 const XMMRegister x7 = xmm7; 4965 4966 const Register tmp1 = r8; 4967 const Register tmp2 = r9; 4968 const Register tmp3 = r10; 4969 const Register tmp4 = r11; 4970 4971 BLOCK_COMMENT("Entry:"); 4972 __ enter(); // required for proper stackwalking of RuntimeStub frame 4973 4974 #ifdef _WIN64 4975 __ push(rsi); 4976 __ push(rdi); 4977 // save the xmm registers which must be preserved 6-7 4978 __ subptr(rsp, 4 * wordSize); 4979 __ movdqu(Address(rsp, 0), xmm6); 4980 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4981 #endif 4982 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4983 4984 #ifdef _WIN64 4985 // restore xmm regs belonging to calling function 4986 __ movdqu(xmm6, Address(rsp, 0)); 4987 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4988 __ addptr(rsp, 4 * wordSize); 4989 __ pop(rdi); 4990 __ pop(rsi); 4991 #endif 4992 4993 __ leave(); // required for proper stackwalking of RuntimeStub frame 4994 __ ret(0); 4995 4996 return start; 4997 4998 } 4999 5000 address generate_libmCos() { 5001 address start = __ pc(); 5002 5003 const XMMRegister x0 = xmm0; 5004 const XMMRegister x1 = xmm1; 5005 const XMMRegister x2 = xmm2; 5006 const XMMRegister x3 = xmm3; 5007 5008 const XMMRegister x4 = xmm4; 5009 const XMMRegister x5 = xmm5; 5010 const XMMRegister x6 = xmm6; 5011 const XMMRegister x7 = xmm7; 5012 5013 const Register tmp1 = r8; 5014 const Register tmp2 = r9; 5015 const Register tmp3 = r10; 5016 const Register tmp4 = r11; 5017 5018 BLOCK_COMMENT("Entry:"); 5019 __ enter(); // required for proper stackwalking of RuntimeStub frame 5020 5021 #ifdef _WIN64 5022 __ push(rsi); 5023 __ push(rdi); 5024 // save the xmm registers which must be preserved 6-7 5025 __ subptr(rsp, 4 * wordSize); 5026 __ movdqu(Address(rsp, 0), xmm6); 5027 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 5028 #endif 5029 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5030 5031 #ifdef _WIN64 5032 // restore xmm regs belonging to calling function 5033 __ movdqu(xmm6, Address(rsp, 0)); 5034 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 5035 __ addptr(rsp, 4 * wordSize); 5036 __ pop(rdi); 5037 __ pop(rsi); 5038 #endif 5039 5040 __ leave(); // required for proper stackwalking of RuntimeStub frame 5041 __ ret(0); 5042 5043 return start; 5044 5045 } 5046 5047 address generate_libmTan() { 5048 address start = __ pc(); 5049 5050 const XMMRegister x0 = xmm0; 5051 const XMMRegister x1 = xmm1; 5052 const XMMRegister x2 = xmm2; 5053 const XMMRegister x3 = xmm3; 5054 5055 const XMMRegister x4 = xmm4; 5056 const XMMRegister x5 = xmm5; 5057 const XMMRegister x6 = xmm6; 5058 const XMMRegister x7 = xmm7; 5059 5060 const Register tmp1 = r8; 5061 const Register tmp2 = r9; 5062 const Register tmp3 = r10; 5063 const Register tmp4 = r11; 5064 5065 BLOCK_COMMENT("Entry:"); 5066 __ enter(); // required for proper stackwalking of RuntimeStub frame 5067 5068 #ifdef _WIN64 5069 __ push(rsi); 5070 __ push(rdi); 5071 // save the xmm registers which must be preserved 6-7 5072 __ subptr(rsp, 4 * wordSize); 5073 __ movdqu(Address(rsp, 0), xmm6); 5074 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 5075 #endif 5076 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5077 5078 #ifdef _WIN64 5079 // restore xmm regs belonging to calling function 5080 __ movdqu(xmm6, Address(rsp, 0)); 5081 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 5082 __ addptr(rsp, 4 * wordSize); 5083 __ pop(rdi); 5084 __ pop(rsi); 5085 #endif 5086 5087 __ leave(); // required for proper stackwalking of RuntimeStub frame 5088 __ ret(0); 5089 5090 return start; 5091 5092 } 5093 5094 #undef __ 5095 #define __ masm-> 5096 5097 // Continuation point for throwing of implicit exceptions that are 5098 // not handled in the current activation. Fabricates an exception 5099 // oop and initiates normal exception dispatching in this 5100 // frame. Since we need to preserve callee-saved values (currently 5101 // only for C2, but done for C1 as well) we need a callee-saved oop 5102 // map and therefore have to make these stubs into RuntimeStubs 5103 // rather than BufferBlobs. If the compiler needs all registers to 5104 // be preserved between the fault point and the exception handler 5105 // then it must assume responsibility for that in 5106 // AbstractCompiler::continuation_for_implicit_null_exception or 5107 // continuation_for_implicit_division_by_zero_exception. All other 5108 // implicit exceptions (e.g., NullPointerException or 5109 // AbstractMethodError on entry) are either at call sites or 5110 // otherwise assume that stack unwinding will be initiated, so 5111 // caller saved registers were assumed volatile in the compiler. 5112 address generate_throw_exception(const char* name, 5113 address runtime_entry, 5114 Register arg1 = noreg, 5115 Register arg2 = noreg) { 5116 // Information about frame layout at time of blocking runtime call. 5117 // Note that we only have to preserve callee-saved registers since 5118 // the compilers are responsible for supplying a continuation point 5119 // if they expect all registers to be preserved. 5120 enum layout { 5121 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5122 rbp_off2, 5123 return_off, 5124 return_off2, 5125 framesize // inclusive of return address 5126 }; 5127 5128 int insts_size = 512; 5129 int locs_size = 64; 5130 5131 CodeBuffer code(name, insts_size, locs_size); 5132 OopMapSet* oop_maps = new OopMapSet(); 5133 MacroAssembler* masm = new MacroAssembler(&code); 5134 5135 address start = __ pc(); 5136 5137 // This is an inlined and slightly modified version of call_VM 5138 // which has the ability to fetch the return PC out of 5139 // thread-local storage and also sets up last_Java_sp slightly 5140 // differently than the real call_VM 5141 5142 __ enter(); // required for proper stackwalking of RuntimeStub frame 5143 5144 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5145 5146 // return address and rbp are already in place 5147 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5148 5149 int frame_complete = __ pc() - start; 5150 5151 // Set up last_Java_sp and last_Java_fp 5152 address the_pc = __ pc(); 5153 __ set_last_Java_frame(rsp, rbp, the_pc); 5154 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5155 5156 // Call runtime 5157 if (arg1 != noreg) { 5158 assert(arg2 != c_rarg1, "clobbered"); 5159 __ movptr(c_rarg1, arg1); 5160 } 5161 if (arg2 != noreg) { 5162 __ movptr(c_rarg2, arg2); 5163 } 5164 __ movptr(c_rarg0, r15_thread); 5165 BLOCK_COMMENT("call runtime_entry"); 5166 __ call(RuntimeAddress(runtime_entry)); 5167 5168 // Generate oop map 5169 OopMap* map = new OopMap(framesize, 0); 5170 5171 oop_maps->add_gc_map(the_pc - start, map); 5172 5173 __ reset_last_Java_frame(true); 5174 5175 __ leave(); // required for proper stackwalking of RuntimeStub frame 5176 5177 // check for pending exceptions 5178 #ifdef ASSERT 5179 Label L; 5180 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5181 (int32_t) NULL_WORD); 5182 __ jcc(Assembler::notEqual, L); 5183 __ should_not_reach_here(); 5184 __ bind(L); 5185 #endif // ASSERT 5186 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5187 5188 5189 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5190 RuntimeStub* stub = 5191 RuntimeStub::new_runtime_stub(name, 5192 &code, 5193 frame_complete, 5194 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5195 oop_maps, false); 5196 return stub->entry_point(); 5197 } 5198 5199 void create_control_words() { 5200 // Round to nearest, 53-bit mode, exceptions masked 5201 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5202 // Round to zero, 53-bit mode, exception mased 5203 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5204 // Round to nearest, 24-bit mode, exceptions masked 5205 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5206 // Round to nearest, 64-bit mode, exceptions masked 5207 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5208 // Round to nearest, 64-bit mode, exceptions masked 5209 StubRoutines::_mxcsr_std = 0x1F80; 5210 // Note: the following two constants are 80-bit values 5211 // layout is critical for correct loading by FPU. 5212 // Bias for strict fp multiply/divide 5213 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5214 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5215 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5216 // Un-Bias for strict fp multiply/divide 5217 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5218 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5219 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5220 } 5221 5222 // Initialization 5223 void generate_initial() { 5224 // Generates all stubs and initializes the entry points 5225 5226 // This platform-specific settings are needed by generate_call_stub() 5227 create_control_words(); 5228 5229 // entry points that exist in all platforms Note: This is code 5230 // that could be shared among different platforms - however the 5231 // benefit seems to be smaller than the disadvantage of having a 5232 // much more complicated generator structure. See also comment in 5233 // stubRoutines.hpp. 5234 5235 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5236 5237 StubRoutines::_call_stub_entry = 5238 generate_call_stub(StubRoutines::_call_stub_return_address); 5239 5240 // is referenced by megamorphic call 5241 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5242 5243 // atomic calls 5244 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5245 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 5246 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5247 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5248 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5249 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5250 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 5251 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5252 5253 // platform dependent 5254 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5255 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5256 5257 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5258 5259 // Build this early so it's available for the interpreter. 5260 StubRoutines::_throw_StackOverflowError_entry = 5261 generate_throw_exception("StackOverflowError throw_exception", 5262 CAST_FROM_FN_PTR(address, 5263 SharedRuntime:: 5264 throw_StackOverflowError)); 5265 StubRoutines::_throw_delayed_StackOverflowError_entry = 5266 generate_throw_exception("delayed StackOverflowError throw_exception", 5267 CAST_FROM_FN_PTR(address, 5268 SharedRuntime:: 5269 throw_delayed_StackOverflowError)); 5270 if (UseCRC32Intrinsics) { 5271 // set table address before stub generation which use it 5272 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5273 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5274 } 5275 5276 if (UseCRC32CIntrinsics) { 5277 bool supports_clmul = VM_Version::supports_clmul(); 5278 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5279 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5280 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5281 } 5282 if (VM_Version::supports_sse2() && UseLibmIntrinsic) { 5283 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5284 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5285 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5286 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5287 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5288 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5289 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5290 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5291 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5292 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5293 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5294 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5295 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5296 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5297 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5298 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5299 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5300 } 5301 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5302 StubRoutines::_dexp = generate_libmExp(); 5303 } 5304 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5305 StubRoutines::_dlog = generate_libmLog(); 5306 } 5307 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5308 StubRoutines::_dlog10 = generate_libmLog10(); 5309 } 5310 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5311 StubRoutines::_dpow = generate_libmPow(); 5312 } 5313 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5314 StubRoutines::_dsin = generate_libmSin(); 5315 } 5316 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5317 StubRoutines::_dcos = generate_libmCos(); 5318 } 5319 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5320 StubRoutines::_dtan = generate_libmTan(); 5321 } 5322 } 5323 } 5324 5325 void generate_all() { 5326 // Generates all stubs and initializes the entry points 5327 5328 // These entry points require SharedInfo::stack0 to be set up in 5329 // non-core builds and need to be relocatable, so they each 5330 // fabricate a RuntimeStub internally. 5331 StubRoutines::_throw_AbstractMethodError_entry = 5332 generate_throw_exception("AbstractMethodError throw_exception", 5333 CAST_FROM_FN_PTR(address, 5334 SharedRuntime:: 5335 throw_AbstractMethodError)); 5336 5337 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5338 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5339 CAST_FROM_FN_PTR(address, 5340 SharedRuntime:: 5341 throw_IncompatibleClassChangeError)); 5342 5343 StubRoutines::_throw_NullPointerException_at_call_entry = 5344 generate_throw_exception("NullPointerException at call throw_exception", 5345 CAST_FROM_FN_PTR(address, 5346 SharedRuntime:: 5347 throw_NullPointerException_at_call)); 5348 5349 // entry points that are platform specific 5350 if (UseShenandoahGC) { 5351 StubRoutines::x86::_shenandoah_wb = generate_shenandoah_wb(); 5352 } 5353 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5354 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5355 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5356 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5357 5358 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5359 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5360 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5361 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5362 5363 // support for verify_oop (must happen after universe_init) 5364 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5365 5366 // arraycopy stubs used by compilers 5367 generate_arraycopy_stubs(); 5368 5369 // don't bother generating these AES intrinsic stubs unless global flag is set 5370 if (UseAESIntrinsics) { 5371 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5372 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5373 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5374 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5375 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5376 } 5377 if (UseAESCTRIntrinsics){ 5378 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5379 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5380 } 5381 5382 if (UseSHA1Intrinsics) { 5383 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5384 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5385 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5386 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5387 } 5388 if (UseSHA256Intrinsics) { 5389 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5390 char* dst = (char*)StubRoutines::x86::_k256_W; 5391 char* src = (char*)StubRoutines::x86::_k256; 5392 for (int ii = 0; ii < 16; ++ii) { 5393 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5394 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5395 } 5396 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5397 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5398 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5399 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5400 } 5401 5402 // Generate GHASH intrinsics code 5403 if (UseGHASHIntrinsics) { 5404 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5405 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5406 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5407 } 5408 5409 // Safefetch stubs. 5410 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5411 &StubRoutines::_safefetch32_fault_pc, 5412 &StubRoutines::_safefetch32_continuation_pc); 5413 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5414 &StubRoutines::_safefetchN_fault_pc, 5415 &StubRoutines::_safefetchN_continuation_pc); 5416 #ifdef COMPILER2 5417 if (UseMultiplyToLenIntrinsic) { 5418 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5419 } 5420 if (UseSquareToLenIntrinsic) { 5421 StubRoutines::_squareToLen = generate_squareToLen(); 5422 } 5423 if (UseMulAddIntrinsic) { 5424 StubRoutines::_mulAdd = generate_mulAdd(); 5425 } 5426 #ifndef _WINDOWS 5427 if (UseMontgomeryMultiplyIntrinsic) { 5428 StubRoutines::_montgomeryMultiply 5429 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5430 } 5431 if (UseMontgomerySquareIntrinsic) { 5432 StubRoutines::_montgomerySquare 5433 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5434 } 5435 #endif // WINDOWS 5436 #endif // COMPILER2 5437 5438 if (UseVectorizedMismatchIntrinsic) { 5439 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5440 } 5441 } 5442 5443 public: 5444 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5445 if (all) { 5446 generate_all(); 5447 } else { 5448 generate_initial(); 5449 } 5450 } 5451 }; // end class declaration 5452 5453 void StubGenerator_generate(CodeBuffer* code, bool all) { 5454 StubGenerator g(code, all); 5455 }