1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2019, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/barrierSetAssembler.hpp" 30 #include "interpreter/interpreter.hpp" 31 #include "nativeInst_ppc.hpp" 32 #include "oops/instanceOop.hpp" 33 #include "oops/method.hpp" 34 #include "oops/objArrayKlass.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "prims/methodHandles.hpp" 37 #include "runtime/frame.inline.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/stubCodeGenerator.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/thread.inline.hpp" 43 #include "utilities/align.hpp" 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp. 48 49 #define __ _masm-> 50 51 #ifdef PRODUCT 52 #define BLOCK_COMMENT(str) // nothing 53 #else 54 #define BLOCK_COMMENT(str) __ block_comment(str) 55 #endif 56 57 #if defined(ABI_ELFv2) 58 #define STUB_ENTRY(name) StubRoutines::name() 59 #else 60 #define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry() 61 #endif 62 63 class StubGenerator: public StubCodeGenerator { 64 private: 65 66 // Call stubs are used to call Java from C 67 // 68 // Arguments: 69 // 70 // R3 - call wrapper address : address 71 // R4 - result : intptr_t* 72 // R5 - result type : BasicType 73 // R6 - method : Method 74 // R7 - frame mgr entry point : address 75 // R8 - parameter block : intptr_t* 76 // R9 - parameter count in words : int 77 // R10 - thread : Thread* 78 // 79 address generate_call_stub(address& return_address) { 80 // Setup a new c frame, copy java arguments, call frame manager or 81 // native_entry, and process result. 82 83 StubCodeMark mark(this, "StubRoutines", "call_stub"); 84 85 address start = __ function_entry(); 86 87 // some sanity checks 88 assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned"); 89 assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned"); 90 assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned"); 91 assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned"); 92 assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned"); 93 94 Register r_arg_call_wrapper_addr = R3; 95 Register r_arg_result_addr = R4; 96 Register r_arg_result_type = R5; 97 Register r_arg_method = R6; 98 Register r_arg_entry = R7; 99 Register r_arg_thread = R10; 100 101 Register r_temp = R24; 102 Register r_top_of_arguments_addr = R25; 103 Register r_entryframe_fp = R26; 104 105 { 106 // Stack on entry to call_stub: 107 // 108 // F1 [C_FRAME] 109 // ... 110 111 Register r_arg_argument_addr = R8; 112 Register r_arg_argument_count = R9; 113 Register r_frame_alignment_in_bytes = R27; 114 Register r_argument_addr = R28; 115 Register r_argumentcopy_addr = R29; 116 Register r_argument_size_in_bytes = R30; 117 Register r_frame_size = R23; 118 119 Label arguments_copied; 120 121 // Save LR/CR to caller's C_FRAME. 122 __ save_LR_CR(R0); 123 124 // Zero extend arg_argument_count. 125 __ clrldi(r_arg_argument_count, r_arg_argument_count, 32); 126 127 // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe). 128 __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 129 130 // Keep copy of our frame pointer (caller's SP). 131 __ mr(r_entryframe_fp, R1_SP); 132 133 BLOCK_COMMENT("Push ENTRY_FRAME including arguments"); 134 // Push ENTRY_FRAME including arguments: 135 // 136 // F0 [TOP_IJAVA_FRAME_ABI] 137 // alignment (optional) 138 // [outgoing Java arguments] 139 // [ENTRY_FRAME_LOCALS] 140 // F1 [C_FRAME] 141 // ... 142 143 // calculate frame size 144 145 // unaligned size of arguments 146 __ sldi(r_argument_size_in_bytes, 147 r_arg_argument_count, Interpreter::logStackElementSize); 148 // arguments alignment (max 1 slot) 149 // FIXME: use round_to() here 150 __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1); 151 __ sldi(r_frame_alignment_in_bytes, 152 r_frame_alignment_in_bytes, Interpreter::logStackElementSize); 153 154 // size = unaligned size of arguments + top abi's size 155 __ addi(r_frame_size, r_argument_size_in_bytes, 156 frame::top_ijava_frame_abi_size); 157 // size += arguments alignment 158 __ add(r_frame_size, 159 r_frame_size, r_frame_alignment_in_bytes); 160 // size += size of call_stub locals 161 __ addi(r_frame_size, 162 r_frame_size, frame::entry_frame_locals_size); 163 164 // push ENTRY_FRAME 165 __ push_frame(r_frame_size, r_temp); 166 167 // initialize call_stub locals (step 1) 168 __ std(r_arg_call_wrapper_addr, 169 _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp); 170 __ std(r_arg_result_addr, 171 _entry_frame_locals_neg(result_address), r_entryframe_fp); 172 __ std(r_arg_result_type, 173 _entry_frame_locals_neg(result_type), r_entryframe_fp); 174 // we will save arguments_tos_address later 175 176 177 BLOCK_COMMENT("Copy Java arguments"); 178 // copy Java arguments 179 180 // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later. 181 // FIXME: why not simply use SP+frame::top_ijava_frame_size? 182 __ addi(r_top_of_arguments_addr, 183 R1_SP, frame::top_ijava_frame_abi_size); 184 __ add(r_top_of_arguments_addr, 185 r_top_of_arguments_addr, r_frame_alignment_in_bytes); 186 187 // any arguments to copy? 188 __ cmpdi(CCR0, r_arg_argument_count, 0); 189 __ beq(CCR0, arguments_copied); 190 191 // prepare loop and copy arguments in reverse order 192 { 193 // init CTR with arg_argument_count 194 __ mtctr(r_arg_argument_count); 195 196 // let r_argumentcopy_addr point to last outgoing Java arguments P 197 __ mr(r_argumentcopy_addr, r_top_of_arguments_addr); 198 199 // let r_argument_addr point to last incoming java argument 200 __ add(r_argument_addr, 201 r_arg_argument_addr, r_argument_size_in_bytes); 202 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 203 204 // now loop while CTR > 0 and copy arguments 205 { 206 Label next_argument; 207 __ bind(next_argument); 208 209 __ ld(r_temp, 0, r_argument_addr); 210 // argument_addr--; 211 __ addi(r_argument_addr, r_argument_addr, -BytesPerWord); 212 __ std(r_temp, 0, r_argumentcopy_addr); 213 // argumentcopy_addr++; 214 __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord); 215 216 __ bdnz(next_argument); 217 } 218 } 219 220 // Arguments copied, continue. 221 __ bind(arguments_copied); 222 } 223 224 { 225 BLOCK_COMMENT("Call frame manager or native entry."); 226 // Call frame manager or native entry. 227 Register r_new_arg_entry = R14; 228 assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr, 229 r_arg_method, r_arg_thread); 230 231 __ mr(r_new_arg_entry, r_arg_entry); 232 233 // Register state on entry to frame manager / native entry: 234 // 235 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8 236 // R19_method - Method 237 // R16_thread - JavaThread* 238 239 // Tos must point to last argument - element_size. 240 const Register tos = R15_esp; 241 242 __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize); 243 244 // initialize call_stub locals (step 2) 245 // now save tos as arguments_tos_address 246 __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp); 247 248 // load argument registers for call 249 __ mr(R19_method, r_arg_method); 250 __ mr(R16_thread, r_arg_thread); 251 assert(tos != r_arg_method, "trashed r_arg_method"); 252 assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread"); 253 254 // Set R15_prev_state to 0 for simplifying checks in callee. 255 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); 256 // Stack on entry to frame manager / native entry: 257 // 258 // F0 [TOP_IJAVA_FRAME_ABI] 259 // alignment (optional) 260 // [outgoing Java arguments] 261 // [ENTRY_FRAME_LOCALS] 262 // F1 [C_FRAME] 263 // ... 264 // 265 266 // global toc register 267 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1); 268 // Remember the senderSP so we interpreter can pop c2i arguments off of the stack 269 // when called via a c2i. 270 271 // Pass initial_caller_sp to framemanager. 272 __ mr(R21_sender_SP, R1_SP); 273 274 // Do a light-weight C-call here, r_new_arg_entry holds the address 275 // of the interpreter entry point (frame manager or native entry) 276 // and save runtime-value of LR in return_address. 277 assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread, 278 "trashed r_new_arg_entry"); 279 return_address = __ call_stub(r_new_arg_entry); 280 } 281 282 { 283 BLOCK_COMMENT("Returned from frame manager or native entry."); 284 // Returned from frame manager or native entry. 285 // Now pop frame, process result, and return to caller. 286 287 // Stack on exit from frame manager / native entry: 288 // 289 // F0 [ABI] 290 // ... 291 // [ENTRY_FRAME_LOCALS] 292 // F1 [C_FRAME] 293 // ... 294 // 295 // Just pop the topmost frame ... 296 // 297 298 Label ret_is_object; 299 Label ret_is_long; 300 Label ret_is_float; 301 Label ret_is_double; 302 303 Register r_entryframe_fp = R30; 304 Register r_lr = R7_ARG5; 305 Register r_cr = R8_ARG6; 306 307 // Reload some volatile registers which we've spilled before the call 308 // to frame manager / native entry. 309 // Access all locals via frame pointer, because we know nothing about 310 // the topmost frame's size. 311 __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP); 312 assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr); 313 __ ld(r_arg_result_addr, 314 _entry_frame_locals_neg(result_address), r_entryframe_fp); 315 __ ld(r_arg_result_type, 316 _entry_frame_locals_neg(result_type), r_entryframe_fp); 317 __ ld(r_cr, _abi(cr), r_entryframe_fp); 318 __ ld(r_lr, _abi(lr), r_entryframe_fp); 319 320 // pop frame and restore non-volatiles, LR and CR 321 __ mr(R1_SP, r_entryframe_fp); 322 __ mtcr(r_cr); 323 __ mtlr(r_lr); 324 325 // Store result depending on type. Everything that is not 326 // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. 327 __ cmpwi(CCR0, r_arg_result_type, T_OBJECT); 328 __ cmpwi(CCR1, r_arg_result_type, T_LONG); 329 __ cmpwi(CCR5, r_arg_result_type, T_FLOAT); 330 __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE); 331 332 // restore non-volatile registers 333 __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14)); 334 335 336 // Stack on exit from call_stub: 337 // 338 // 0 [C_FRAME] 339 // ... 340 // 341 // no call_stub frames left. 342 343 // All non-volatiles have been restored at this point!! 344 assert(R3_RET == R3, "R3_RET should be R3"); 345 346 __ beq(CCR0, ret_is_object); 347 __ beq(CCR1, ret_is_long); 348 __ beq(CCR5, ret_is_float); 349 __ beq(CCR6, ret_is_double); 350 351 // default: 352 __ stw(R3_RET, 0, r_arg_result_addr); 353 __ blr(); // return to caller 354 355 // case T_OBJECT: 356 __ bind(ret_is_object); 357 __ std(R3_RET, 0, r_arg_result_addr); 358 __ blr(); // return to caller 359 360 // case T_LONG: 361 __ bind(ret_is_long); 362 __ std(R3_RET, 0, r_arg_result_addr); 363 __ blr(); // return to caller 364 365 // case T_FLOAT: 366 __ bind(ret_is_float); 367 __ stfs(F1_RET, 0, r_arg_result_addr); 368 __ blr(); // return to caller 369 370 // case T_DOUBLE: 371 __ bind(ret_is_double); 372 __ stfd(F1_RET, 0, r_arg_result_addr); 373 __ blr(); // return to caller 374 } 375 376 return start; 377 } 378 379 // Return point for a Java call if there's an exception thrown in 380 // Java code. The exception is caught and transformed into a 381 // pending exception stored in JavaThread that can be tested from 382 // within the VM. 383 // 384 address generate_catch_exception() { 385 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 386 387 address start = __ pc(); 388 389 // Registers alive 390 // 391 // R16_thread 392 // R3_ARG1 - address of pending exception 393 // R4_ARG2 - return address in call stub 394 395 const Register exception_file = R21_tmp1; 396 const Register exception_line = R22_tmp2; 397 398 __ load_const(exception_file, (void*)__FILE__); 399 __ load_const(exception_line, (void*)__LINE__); 400 401 __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread); 402 // store into `char *' 403 __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread); 404 // store into `int' 405 __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread); 406 407 // complete return to VM 408 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 409 410 __ mtlr(R4_ARG2); 411 // continue in call stub 412 __ blr(); 413 414 return start; 415 } 416 417 // Continuation point for runtime calls returning with a pending 418 // exception. The pending exception check happened in the runtime 419 // or native call stub. The pending exception in Thread is 420 // converted into a Java-level exception. 421 // 422 // Read: 423 // 424 // LR: The pc the runtime library callee wants to return to. 425 // Since the exception occurred in the callee, the return pc 426 // from the point of view of Java is the exception pc. 427 // thread: Needed for method handles. 428 // 429 // Invalidate: 430 // 431 // volatile registers (except below). 432 // 433 // Update: 434 // 435 // R4_ARG2: exception 436 // 437 // (LR is unchanged and is live out). 438 // 439 address generate_forward_exception() { 440 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 441 address start = __ pc(); 442 443 #if !defined(PRODUCT) 444 if (VerifyOops) { 445 // Get pending exception oop. 446 __ ld(R3_ARG1, 447 in_bytes(Thread::pending_exception_offset()), 448 R16_thread); 449 // Make sure that this code is only executed if there is a pending exception. 450 { 451 Label L; 452 __ cmpdi(CCR0, R3_ARG1, 0); 453 __ bne(CCR0, L); 454 __ stop("StubRoutines::forward exception: no pending exception (1)"); 455 __ bind(L); 456 } 457 __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop"); 458 } 459 #endif 460 461 // Save LR/CR and copy exception pc (LR) into R4_ARG2. 462 __ save_LR_CR(R4_ARG2); 463 __ push_frame_reg_args(0, R0); 464 // Find exception handler. 465 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 466 SharedRuntime::exception_handler_for_return_address), 467 R16_thread, 468 R4_ARG2); 469 // Copy handler's address. 470 __ mtctr(R3_RET); 471 __ pop_frame(); 472 __ restore_LR_CR(R0); 473 474 // Set up the arguments for the exception handler: 475 // - R3_ARG1: exception oop 476 // - R4_ARG2: exception pc. 477 478 // Load pending exception oop. 479 __ ld(R3_ARG1, 480 in_bytes(Thread::pending_exception_offset()), 481 R16_thread); 482 483 // The exception pc is the return address in the caller. 484 // Must load it into R4_ARG2. 485 __ mflr(R4_ARG2); 486 487 #ifdef ASSERT 488 // Make sure exception is set. 489 { 490 Label L; 491 __ cmpdi(CCR0, R3_ARG1, 0); 492 __ bne(CCR0, L); 493 __ stop("StubRoutines::forward exception: no pending exception (2)"); 494 __ bind(L); 495 } 496 #endif 497 498 // Clear the pending exception. 499 __ li(R0, 0); 500 __ std(R0, 501 in_bytes(Thread::pending_exception_offset()), 502 R16_thread); 503 // Jump to exception handler. 504 __ bctr(); 505 506 return start; 507 } 508 509 #undef __ 510 #define __ masm-> 511 // Continuation point for throwing of implicit exceptions that are 512 // not handled in the current activation. Fabricates an exception 513 // oop and initiates normal exception dispatching in this 514 // frame. Only callee-saved registers are preserved (through the 515 // normal register window / RegisterMap handling). If the compiler 516 // needs all registers to be preserved between the fault point and 517 // the exception handler then it must assume responsibility for that 518 // in AbstractCompiler::continuation_for_implicit_null_exception or 519 // continuation_for_implicit_division_by_zero_exception. All other 520 // implicit exceptions (e.g., NullPointerException or 521 // AbstractMethodError on entry) are either at call sites or 522 // otherwise assume that stack unwinding will be initiated, so 523 // caller saved registers were assumed volatile in the compiler. 524 // 525 // Note that we generate only this stub into a RuntimeStub, because 526 // it needs to be properly traversed and ignored during GC, so we 527 // change the meaning of the "__" macro within this method. 528 // 529 // Note: the routine set_pc_not_at_call_for_caller in 530 // SharedRuntime.cpp requires that this code be generated into a 531 // RuntimeStub. 532 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc, 533 Register arg1 = noreg, Register arg2 = noreg) { 534 CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0); 535 MacroAssembler* masm = new MacroAssembler(&code); 536 537 OopMapSet* oop_maps = new OopMapSet(); 538 int frame_size_in_bytes = frame::abi_reg_args_size; 539 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 540 541 address start = __ pc(); 542 543 __ save_LR_CR(R11_scratch1); 544 545 // Push a frame. 546 __ push_frame_reg_args(0, R11_scratch1); 547 548 address frame_complete_pc = __ pc(); 549 550 if (restore_saved_exception_pc) { 551 __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74); 552 } 553 554 // Note that we always have a runtime stub frame on the top of 555 // stack by this point. Remember the offset of the instruction 556 // whose address will be moved to R11_scratch1. 557 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); 558 559 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); 560 561 __ mr(R3_ARG1, R16_thread); 562 if (arg1 != noreg) { 563 __ mr(R4_ARG2, arg1); 564 } 565 if (arg2 != noreg) { 566 __ mr(R5_ARG3, arg2); 567 } 568 #if defined(ABI_ELFv2) 569 __ call_c(runtime_entry, relocInfo::none); 570 #else 571 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none); 572 #endif 573 574 // Set an oopmap for the call site. 575 oop_maps->add_gc_map((int)(gc_map_pc - start), map); 576 577 __ reset_last_Java_frame(); 578 579 #ifdef ASSERT 580 // Make sure that this code is only executed if there is a pending 581 // exception. 582 { 583 Label L; 584 __ ld(R0, 585 in_bytes(Thread::pending_exception_offset()), 586 R16_thread); 587 __ cmpdi(CCR0, R0, 0); 588 __ bne(CCR0, L); 589 __ stop("StubRoutines::throw_exception: no pending exception"); 590 __ bind(L); 591 } 592 #endif 593 594 // Pop frame. 595 __ pop_frame(); 596 597 __ restore_LR_CR(R11_scratch1); 598 599 __ load_const(R11_scratch1, StubRoutines::forward_exception_entry()); 600 __ mtctr(R11_scratch1); 601 __ bctr(); 602 603 // Create runtime stub with OopMap. 604 RuntimeStub* stub = 605 RuntimeStub::new_runtime_stub(name, &code, 606 /*frame_complete=*/ (int)(frame_complete_pc - start), 607 frame_size_in_bytes/wordSize, 608 oop_maps, 609 false); 610 return stub->entry_point(); 611 } 612 #undef __ 613 #define __ _masm-> 614 615 616 // Support for void zero_words_aligned8(HeapWord* to, size_t count) 617 // 618 // Arguments: 619 // to: 620 // count: 621 // 622 // Destroys: 623 // 624 address generate_zero_words_aligned8() { 625 StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); 626 627 // Implemented as in ClearArray. 628 address start = __ function_entry(); 629 630 Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) 631 Register cnt_dwords_reg = R4_ARG2; // count (in dwords) 632 Register tmp1_reg = R5_ARG3; 633 Register tmp2_reg = R6_ARG4; 634 Register zero_reg = R7_ARG5; 635 636 // Procedure for large arrays (uses data cache block zero instruction). 637 Label dwloop, fast, fastloop, restloop, lastdword, done; 638 int cl_size = VM_Version::L1_data_cache_line_size(); 639 int cl_dwords = cl_size >> 3; 640 int cl_dwordaddr_bits = exact_log2(cl_dwords); 641 int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. 642 643 // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. 644 __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... 645 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even. 646 __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords 647 __ load_const_optimized(zero_reg, 0L); // Use as zero register. 648 649 __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even? 650 __ beq(CCR0, lastdword); // size <= 1 651 __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0). 652 __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included? 653 __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000 654 655 __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.) 656 __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16. 657 658 __ beq(CCR0, fast); // already 128byte aligned 659 __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (0<ctr<cnt). 660 __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8) 661 662 // Clear in first cache line dword-by-dword if not already 128byte aligned. 663 __ bind(dwloop); 664 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 665 __ addi(base_ptr_reg, base_ptr_reg, 8); 666 __ bdnz(dwloop); 667 668 // clear 128byte blocks 669 __ bind(fast); 670 __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8) 671 __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even 672 673 __ mtctr(tmp1_reg); // load counter 674 __ cmpdi(CCR1, tmp2_reg, 0); // rest even? 675 __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords 676 677 __ bind(fastloop); 678 __ dcbz(base_ptr_reg); // Clear 128byte aligned block. 679 __ addi(base_ptr_reg, base_ptr_reg, cl_size); 680 __ bdnz(fastloop); 681 682 //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line. 683 __ beq(CCR0, lastdword); // rest<=1 684 __ mtctr(tmp1_reg); // load counter 685 686 // Clear rest. 687 __ bind(restloop); 688 __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. 689 __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block. 690 __ addi(base_ptr_reg, base_ptr_reg, 16); 691 __ bdnz(restloop); 692 693 __ bind(lastdword); 694 __ beq(CCR1, done); 695 __ std(zero_reg, 0, base_ptr_reg); 696 __ bind(done); 697 __ blr(); // return 698 699 return start; 700 } 701 702 #if !defined(PRODUCT) 703 // Wrapper which calls oopDesc::is_oop_or_null() 704 // Only called by MacroAssembler::verify_oop 705 static void verify_oop_helper(const char* message, oop o) { 706 if (!oopDesc::is_oop_or_null(o)) { 707 fatal("%s", message); 708 } 709 ++ StubRoutines::_verify_oop_count; 710 } 711 #endif 712 713 // Return address of code to be called from code generated by 714 // MacroAssembler::verify_oop. 715 // 716 // Don't generate, rather use C++ code. 717 address generate_verify_oop() { 718 // this is actually a `FunctionDescriptor*'. 719 address start = 0; 720 721 #if !defined(PRODUCT) 722 start = CAST_FROM_FN_PTR(address, verify_oop_helper); 723 #endif 724 725 return start; 726 } 727 728 729 // -XX:+OptimizeFill : convert fill/copy loops into intrinsic 730 // 731 // The code is implemented(ported from sparc) as we believe it benefits JVM98, however 732 // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all! 733 // 734 // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition 735 // for turning on loop predication optimization, and hence the behavior of "array range check" 736 // and "loop invariant check" could be influenced, which potentially boosted JVM98. 737 // 738 // Generate stub for disjoint short fill. If "aligned" is true, the 739 // "to" address is assumed to be heapword aligned. 740 // 741 // Arguments for generated stub: 742 // to: R3_ARG1 743 // value: R4_ARG2 744 // count: R5_ARG3 treated as signed 745 // 746 address generate_fill(BasicType t, bool aligned, const char* name) { 747 StubCodeMark mark(this, "StubRoutines", name); 748 address start = __ function_entry(); 749 750 const Register to = R3_ARG1; // source array address 751 const Register value = R4_ARG2; // fill value 752 const Register count = R5_ARG3; // elements count 753 const Register temp = R6_ARG4; // temp register 754 755 //assert_clean_int(count, O3); // Make sure 'count' is clean int. 756 757 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 758 Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes; 759 760 int shift = -1; 761 switch (t) { 762 case T_BYTE: 763 shift = 2; 764 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 765 __ rldimi(value, value, 8, 48); // 8 bit -> 16 bit 766 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 767 __ blt(CCR0, L_fill_elements); 768 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 769 break; 770 case T_SHORT: 771 shift = 1; 772 // Clone bytes (zero extend not needed because store instructions below ignore high order bytes). 773 __ rldimi(value, value, 16, 32); // 16 bit -> 32 bit 774 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 775 __ blt(CCR0, L_fill_elements); 776 break; 777 case T_INT: 778 shift = 0; 779 __ cmpdi(CCR0, count, 2<<shift); // Short arrays (< 8 bytes) fill by element. 780 __ blt(CCR0, L_fill_4_bytes); 781 break; 782 default: ShouldNotReachHere(); 783 } 784 785 if (!aligned && (t == T_BYTE || t == T_SHORT)) { 786 // Align source address at 4 bytes address boundary. 787 if (t == T_BYTE) { 788 // One byte misalignment happens only for byte arrays. 789 __ andi_(temp, to, 1); 790 __ beq(CCR0, L_skip_align1); 791 __ stb(value, 0, to); 792 __ addi(to, to, 1); 793 __ addi(count, count, -1); 794 __ bind(L_skip_align1); 795 } 796 // Two bytes misalignment happens only for byte and short (char) arrays. 797 __ andi_(temp, to, 2); 798 __ beq(CCR0, L_skip_align2); 799 __ sth(value, 0, to); 800 __ addi(to, to, 2); 801 __ addi(count, count, -(1 << (shift - 1))); 802 __ bind(L_skip_align2); 803 } 804 805 if (!aligned) { 806 // Align to 8 bytes, we know we are 4 byte aligned to start. 807 __ andi_(temp, to, 7); 808 __ beq(CCR0, L_fill_32_bytes); 809 __ stw(value, 0, to); 810 __ addi(to, to, 4); 811 __ addi(count, count, -(1 << shift)); 812 __ bind(L_fill_32_bytes); 813 } 814 815 __ li(temp, 8<<shift); // Prepare for 32 byte loop. 816 // Clone bytes int->long as above. 817 __ rldimi(value, value, 32, 0); // 32 bit -> 64 bit 818 819 Label L_check_fill_8_bytes; 820 // Fill 32-byte chunks. 821 __ subf_(count, temp, count); 822 __ blt(CCR0, L_check_fill_8_bytes); 823 824 Label L_fill_32_bytes_loop; 825 __ align(32); 826 __ bind(L_fill_32_bytes_loop); 827 828 __ std(value, 0, to); 829 __ std(value, 8, to); 830 __ subf_(count, temp, count); // Update count. 831 __ std(value, 16, to); 832 __ std(value, 24, to); 833 834 __ addi(to, to, 32); 835 __ bge(CCR0, L_fill_32_bytes_loop); 836 837 __ bind(L_check_fill_8_bytes); 838 __ add_(count, temp, count); 839 __ beq(CCR0, L_exit); 840 __ addic_(count, count, -(2 << shift)); 841 __ blt(CCR0, L_fill_4_bytes); 842 843 // 844 // Length is too short, just fill 8 bytes at a time. 845 // 846 Label L_fill_8_bytes_loop; 847 __ bind(L_fill_8_bytes_loop); 848 __ std(value, 0, to); 849 __ addic_(count, count, -(2 << shift)); 850 __ addi(to, to, 8); 851 __ bge(CCR0, L_fill_8_bytes_loop); 852 853 // Fill trailing 4 bytes. 854 __ bind(L_fill_4_bytes); 855 __ andi_(temp, count, 1<<shift); 856 __ beq(CCR0, L_fill_2_bytes); 857 858 __ stw(value, 0, to); 859 if (t == T_BYTE || t == T_SHORT) { 860 __ addi(to, to, 4); 861 // Fill trailing 2 bytes. 862 __ bind(L_fill_2_bytes); 863 __ andi_(temp, count, 1<<(shift-1)); 864 __ beq(CCR0, L_fill_byte); 865 __ sth(value, 0, to); 866 if (t == T_BYTE) { 867 __ addi(to, to, 2); 868 // Fill trailing byte. 869 __ bind(L_fill_byte); 870 __ andi_(count, count, 1); 871 __ beq(CCR0, L_exit); 872 __ stb(value, 0, to); 873 } else { 874 __ bind(L_fill_byte); 875 } 876 } else { 877 __ bind(L_fill_2_bytes); 878 } 879 __ bind(L_exit); 880 __ blr(); 881 882 // Handle copies less than 8 bytes. Int is handled elsewhere. 883 if (t == T_BYTE) { 884 __ bind(L_fill_elements); 885 Label L_fill_2, L_fill_4; 886 __ andi_(temp, count, 1); 887 __ beq(CCR0, L_fill_2); 888 __ stb(value, 0, to); 889 __ addi(to, to, 1); 890 __ bind(L_fill_2); 891 __ andi_(temp, count, 2); 892 __ beq(CCR0, L_fill_4); 893 __ stb(value, 0, to); 894 __ stb(value, 0, to); 895 __ addi(to, to, 2); 896 __ bind(L_fill_4); 897 __ andi_(temp, count, 4); 898 __ beq(CCR0, L_exit); 899 __ stb(value, 0, to); 900 __ stb(value, 1, to); 901 __ stb(value, 2, to); 902 __ stb(value, 3, to); 903 __ blr(); 904 } 905 906 if (t == T_SHORT) { 907 Label L_fill_2; 908 __ bind(L_fill_elements); 909 __ andi_(temp, count, 1); 910 __ beq(CCR0, L_fill_2); 911 __ sth(value, 0, to); 912 __ addi(to, to, 2); 913 __ bind(L_fill_2); 914 __ andi_(temp, count, 2); 915 __ beq(CCR0, L_exit); 916 __ sth(value, 0, to); 917 __ sth(value, 2, to); 918 __ blr(); 919 } 920 return start; 921 } 922 923 inline void assert_positive_int(Register count) { 924 #ifdef ASSERT 925 __ srdi_(R0, count, 31); 926 __ asm_assert_eq("missing zero extend", 0xAFFE); 927 #endif 928 } 929 930 // Generate overlap test for array copy stubs. 931 // 932 // Input: 933 // R3_ARG1 - from 934 // R4_ARG2 - to 935 // R5_ARG3 - element count 936 // 937 void array_overlap_test(address no_overlap_target, int log2_elem_size) { 938 Register tmp1 = R6_ARG4; 939 Register tmp2 = R7_ARG5; 940 941 assert_positive_int(R5_ARG3); 942 943 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 944 __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes 945 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 946 __ cmpld(CCR1, tmp1, tmp2); 947 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less); 948 // Overlaps if Src before dst and distance smaller than size. 949 // Branch to forward copy routine otherwise (within range of 32kB). 950 __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target); 951 952 // need to copy backwards 953 } 954 955 // This is common errorexit stub for UnsafeCopyMemory. 956 address generate_unsafecopy_common_error_exit() { 957 address start_pc = __ pc(); 958 Register tmp1 = R6_ARG4; 959 // probably copy stub would have changed value reset it. 960 if (VM_Version::has_mfdscr()) { 961 __ load_const_optimized(tmp1, VM_Version::_dscr_val); 962 __ mtdscr(tmp1); 963 } 964 __ li(R3_RET, 0); // return 0 965 __ blr(); 966 return start_pc; 967 } 968 969 // The guideline in the implementations of generate_disjoint_xxx_copy 970 // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with 971 // single instructions, but to avoid alignment interrupts (see subsequent 972 // comment). Furthermore, we try to minimize misaligned access, even 973 // though they cause no alignment interrupt. 974 // 975 // In Big-Endian mode, the PowerPC architecture requires implementations to 976 // handle automatically misaligned integer halfword and word accesses, 977 // word-aligned integer doubleword accesses, and word-aligned floating-point 978 // accesses. Other accesses may or may not generate an Alignment interrupt 979 // depending on the implementation. 980 // Alignment interrupt handling may require on the order of hundreds of cycles, 981 // so every effort should be made to avoid misaligned memory values. 982 // 983 // 984 // Generate stub for disjoint byte copy. If "aligned" is true, the 985 // "from" and "to" addresses are assumed to be heapword aligned. 986 // 987 // Arguments for generated stub: 988 // from: R3_ARG1 989 // to: R4_ARG2 990 // count: R5_ARG3 treated as signed 991 // 992 address generate_disjoint_byte_copy(bool aligned, const char * name) { 993 StubCodeMark mark(this, "StubRoutines", name); 994 address start = __ function_entry(); 995 assert_positive_int(R5_ARG3); 996 997 Register tmp1 = R6_ARG4; 998 Register tmp2 = R7_ARG5; 999 Register tmp3 = R8_ARG6; 1000 Register tmp4 = R9_ARG7; 1001 1002 VectorSRegister tmp_vsr1 = VSR1; 1003 VectorSRegister tmp_vsr2 = VSR2; 1004 1005 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10; 1006 { 1007 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1008 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1009 1010 // Don't try anything fancy if arrays don't have many elements. 1011 __ li(tmp3, 0); 1012 __ cmpwi(CCR0, R5_ARG3, 17); 1013 __ ble(CCR0, l_6); // copy 4 at a time 1014 1015 if (!aligned) { 1016 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1017 __ andi_(tmp1, tmp1, 3); 1018 __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy. 1019 1020 // Copy elements if necessary to align to 4 bytes. 1021 __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary. 1022 __ andi_(tmp1, tmp1, 3); 1023 __ beq(CCR0, l_2); 1024 1025 __ subf(R5_ARG3, tmp1, R5_ARG3); 1026 __ bind(l_9); 1027 __ lbz(tmp2, 0, R3_ARG1); 1028 __ addic_(tmp1, tmp1, -1); 1029 __ stb(tmp2, 0, R4_ARG2); 1030 __ addi(R3_ARG1, R3_ARG1, 1); 1031 __ addi(R4_ARG2, R4_ARG2, 1); 1032 __ bne(CCR0, l_9); 1033 1034 __ bind(l_2); 1035 } 1036 1037 // copy 8 elements at a time 1038 __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8 1039 __ andi_(tmp1, tmp2, 7); 1040 __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8 1041 1042 // copy a 2-element word if necessary to align to 8 bytes 1043 __ andi_(R0, R3_ARG1, 7); 1044 __ beq(CCR0, l_7); 1045 1046 __ lwzx(tmp2, R3_ARG1, tmp3); 1047 __ addi(R5_ARG3, R5_ARG3, -4); 1048 __ stwx(tmp2, R4_ARG2, tmp3); 1049 { // FasterArrayCopy 1050 __ addi(R3_ARG1, R3_ARG1, 4); 1051 __ addi(R4_ARG2, R4_ARG2, 4); 1052 } 1053 __ bind(l_7); 1054 1055 { // FasterArrayCopy 1056 __ cmpwi(CCR0, R5_ARG3, 31); 1057 __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain 1058 1059 __ srdi(tmp1, R5_ARG3, 5); 1060 __ andi_(R5_ARG3, R5_ARG3, 31); 1061 __ mtctr(tmp1); 1062 1063 if (!VM_Version::has_vsx()) { 1064 1065 __ bind(l_8); 1066 // Use unrolled version for mass copying (copy 32 elements a time) 1067 // Load feeding store gets zero latency on Power6, however not on Power5. 1068 // Therefore, the following sequence is made for the good of both. 1069 __ ld(tmp1, 0, R3_ARG1); 1070 __ ld(tmp2, 8, R3_ARG1); 1071 __ ld(tmp3, 16, R3_ARG1); 1072 __ ld(tmp4, 24, R3_ARG1); 1073 __ std(tmp1, 0, R4_ARG2); 1074 __ std(tmp2, 8, R4_ARG2); 1075 __ std(tmp3, 16, R4_ARG2); 1076 __ std(tmp4, 24, R4_ARG2); 1077 __ addi(R3_ARG1, R3_ARG1, 32); 1078 __ addi(R4_ARG2, R4_ARG2, 32); 1079 __ bdnz(l_8); 1080 1081 } else { // Processor supports VSX, so use it to mass copy. 1082 1083 // Prefetch the data into the L2 cache. 1084 __ dcbt(R3_ARG1, 0); 1085 1086 // If supported set DSCR pre-fetch to deepest. 1087 if (VM_Version::has_mfdscr()) { 1088 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1089 __ mtdscr(tmp2); 1090 } 1091 1092 __ li(tmp1, 16); 1093 1094 // Backbranch target aligned to 32-byte. Not 16-byte align as 1095 // loop contains < 8 instructions that fit inside a single 1096 // i-cache sector. 1097 __ align(32); 1098 1099 __ bind(l_10); 1100 // Use loop with VSX load/store instructions to 1101 // copy 32 elements a time. 1102 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1103 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1104 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1105 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1106 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1107 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1108 __ bdnz(l_10); // Dec CTR and loop if not zero. 1109 1110 // Restore DSCR pre-fetch value. 1111 if (VM_Version::has_mfdscr()) { 1112 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1113 __ mtdscr(tmp2); 1114 } 1115 1116 } // VSX 1117 } // FasterArrayCopy 1118 1119 __ bind(l_6); 1120 1121 // copy 4 elements at a time 1122 __ cmpwi(CCR0, R5_ARG3, 4); 1123 __ blt(CCR0, l_1); 1124 __ srdi(tmp1, R5_ARG3, 2); 1125 __ mtctr(tmp1); // is > 0 1126 __ andi_(R5_ARG3, R5_ARG3, 3); 1127 1128 { // FasterArrayCopy 1129 __ addi(R3_ARG1, R3_ARG1, -4); 1130 __ addi(R4_ARG2, R4_ARG2, -4); 1131 __ bind(l_3); 1132 __ lwzu(tmp2, 4, R3_ARG1); 1133 __ stwu(tmp2, 4, R4_ARG2); 1134 __ bdnz(l_3); 1135 __ addi(R3_ARG1, R3_ARG1, 4); 1136 __ addi(R4_ARG2, R4_ARG2, 4); 1137 } 1138 1139 // do single element copy 1140 __ bind(l_1); 1141 __ cmpwi(CCR0, R5_ARG3, 0); 1142 __ beq(CCR0, l_4); 1143 1144 { // FasterArrayCopy 1145 __ mtctr(R5_ARG3); 1146 __ addi(R3_ARG1, R3_ARG1, -1); 1147 __ addi(R4_ARG2, R4_ARG2, -1); 1148 1149 __ bind(l_5); 1150 __ lbzu(tmp2, 1, R3_ARG1); 1151 __ stbu(tmp2, 1, R4_ARG2); 1152 __ bdnz(l_5); 1153 } 1154 } 1155 1156 __ bind(l_4); 1157 __ li(R3_RET, 0); // return 0 1158 __ blr(); 1159 1160 return start; 1161 } 1162 1163 // Generate stub for conjoint byte copy. If "aligned" is true, the 1164 // "from" and "to" addresses are assumed to be heapword aligned. 1165 // 1166 // Arguments for generated stub: 1167 // from: R3_ARG1 1168 // to: R4_ARG2 1169 // count: R5_ARG3 treated as signed 1170 // 1171 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1172 StubCodeMark mark(this, "StubRoutines", name); 1173 address start = __ function_entry(); 1174 assert_positive_int(R5_ARG3); 1175 1176 Register tmp1 = R6_ARG4; 1177 Register tmp2 = R7_ARG5; 1178 Register tmp3 = R8_ARG6; 1179 1180 address nooverlap_target = aligned ? 1181 STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) : 1182 STUB_ENTRY(jbyte_disjoint_arraycopy); 1183 1184 array_overlap_test(nooverlap_target, 0); 1185 // Do reverse copy. We assume the case of actual overlap is rare enough 1186 // that we don't have to optimize it. 1187 Label l_1, l_2; 1188 { 1189 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1190 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1191 __ b(l_2); 1192 __ bind(l_1); 1193 __ stbx(tmp1, R4_ARG2, R5_ARG3); 1194 __ bind(l_2); 1195 __ addic_(R5_ARG3, R5_ARG3, -1); 1196 __ lbzx(tmp1, R3_ARG1, R5_ARG3); 1197 __ bge(CCR0, l_1); 1198 } 1199 __ li(R3_RET, 0); // return 0 1200 __ blr(); 1201 1202 return start; 1203 } 1204 1205 // Generate stub for disjoint short copy. If "aligned" is true, the 1206 // "from" and "to" addresses are assumed to be heapword aligned. 1207 // 1208 // Arguments for generated stub: 1209 // from: R3_ARG1 1210 // to: R4_ARG2 1211 // elm.count: R5_ARG3 treated as signed 1212 // 1213 // Strategy for aligned==true: 1214 // 1215 // If length <= 9: 1216 // 1. copy 2 elements at a time (l_6) 1217 // 2. copy last element if original element count was odd (l_1) 1218 // 1219 // If length > 9: 1220 // 1. copy 4 elements at a time until less than 4 elements are left (l_7) 1221 // 2. copy 2 elements at a time until less than 2 elements are left (l_6) 1222 // 3. copy last element if one was left in step 2. (l_1) 1223 // 1224 // 1225 // Strategy for aligned==false: 1226 // 1227 // If length <= 9: same as aligned==true case, but NOTE: load/stores 1228 // can be unaligned (see comment below) 1229 // 1230 // If length > 9: 1231 // 1. continue with step 6. if the alignment of from and to mod 4 1232 // is different. 1233 // 2. align from and to to 4 bytes by copying 1 element if necessary 1234 // 3. at l_2 from and to are 4 byte aligned; continue with 1235 // 5. if they cannot be aligned to 8 bytes because they have 1236 // got different alignment mod 8. 1237 // 4. at this point we know that both, from and to, have the same 1238 // alignment mod 8, now copy one element if necessary to get 1239 // 8 byte alignment of from and to. 1240 // 5. copy 4 elements at a time until less than 4 elements are 1241 // left; depending on step 3. all load/stores are aligned or 1242 // either all loads or all stores are unaligned. 1243 // 6. copy 2 elements at a time until less than 2 elements are 1244 // left (l_6); arriving here from step 1., there is a chance 1245 // that all accesses are unaligned. 1246 // 7. copy last element if one was left in step 6. (l_1) 1247 // 1248 // There are unaligned data accesses using integer load/store 1249 // instructions in this stub. POWER allows such accesses. 1250 // 1251 // According to the manuals (PowerISA_V2.06_PUBLIC, Book II, 1252 // Chapter 2: Effect of Operand Placement on Performance) unaligned 1253 // integer load/stores have good performance. Only unaligned 1254 // floating point load/stores can have poor performance. 1255 // 1256 // TODO: 1257 // 1258 // 1. check if aligning the backbranch target of loops is beneficial 1259 // 1260 address generate_disjoint_short_copy(bool aligned, const char * name) { 1261 StubCodeMark mark(this, "StubRoutines", name); 1262 1263 Register tmp1 = R6_ARG4; 1264 Register tmp2 = R7_ARG5; 1265 Register tmp3 = R8_ARG6; 1266 Register tmp4 = R9_ARG7; 1267 1268 VectorSRegister tmp_vsr1 = VSR1; 1269 VectorSRegister tmp_vsr2 = VSR2; 1270 1271 address start = __ function_entry(); 1272 assert_positive_int(R5_ARG3); 1273 1274 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9; 1275 { 1276 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1277 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1278 // don't try anything fancy if arrays don't have many elements 1279 __ li(tmp3, 0); 1280 __ cmpwi(CCR0, R5_ARG3, 9); 1281 __ ble(CCR0, l_6); // copy 2 at a time 1282 1283 if (!aligned) { 1284 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1285 __ andi_(tmp1, tmp1, 3); 1286 __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy 1287 1288 // At this point it is guaranteed that both, from and to have the same alignment mod 4. 1289 1290 // Copy 1 element if necessary to align to 4 bytes. 1291 __ andi_(tmp1, R3_ARG1, 3); 1292 __ beq(CCR0, l_2); 1293 1294 __ lhz(tmp2, 0, R3_ARG1); 1295 __ addi(R3_ARG1, R3_ARG1, 2); 1296 __ sth(tmp2, 0, R4_ARG2); 1297 __ addi(R4_ARG2, R4_ARG2, 2); 1298 __ addi(R5_ARG3, R5_ARG3, -1); 1299 __ bind(l_2); 1300 1301 // At this point the positions of both, from and to, are at least 4 byte aligned. 1302 1303 // Copy 4 elements at a time. 1304 // Align to 8 bytes, but only if both, from and to, have same alignment mod 8. 1305 __ xorr(tmp2, R3_ARG1, R4_ARG2); 1306 __ andi_(tmp1, tmp2, 7); 1307 __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned 1308 1309 // Copy a 2-element word if necessary to align to 8 bytes. 1310 __ andi_(R0, R3_ARG1, 7); 1311 __ beq(CCR0, l_7); 1312 1313 __ lwzx(tmp2, R3_ARG1, tmp3); 1314 __ addi(R5_ARG3, R5_ARG3, -2); 1315 __ stwx(tmp2, R4_ARG2, tmp3); 1316 { // FasterArrayCopy 1317 __ addi(R3_ARG1, R3_ARG1, 4); 1318 __ addi(R4_ARG2, R4_ARG2, 4); 1319 } 1320 } 1321 1322 __ bind(l_7); 1323 1324 // Copy 4 elements at a time; either the loads or the stores can 1325 // be unaligned if aligned == false. 1326 1327 { // FasterArrayCopy 1328 __ cmpwi(CCR0, R5_ARG3, 15); 1329 __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain 1330 1331 __ srdi(tmp1, R5_ARG3, 4); 1332 __ andi_(R5_ARG3, R5_ARG3, 15); 1333 __ mtctr(tmp1); 1334 1335 if (!VM_Version::has_vsx()) { 1336 1337 __ bind(l_8); 1338 // Use unrolled version for mass copying (copy 16 elements a time). 1339 // Load feeding store gets zero latency on Power6, however not on Power5. 1340 // Therefore, the following sequence is made for the good of both. 1341 __ ld(tmp1, 0, R3_ARG1); 1342 __ ld(tmp2, 8, R3_ARG1); 1343 __ ld(tmp3, 16, R3_ARG1); 1344 __ ld(tmp4, 24, R3_ARG1); 1345 __ std(tmp1, 0, R4_ARG2); 1346 __ std(tmp2, 8, R4_ARG2); 1347 __ std(tmp3, 16, R4_ARG2); 1348 __ std(tmp4, 24, R4_ARG2); 1349 __ addi(R3_ARG1, R3_ARG1, 32); 1350 __ addi(R4_ARG2, R4_ARG2, 32); 1351 __ bdnz(l_8); 1352 1353 } else { // Processor supports VSX, so use it to mass copy. 1354 1355 // Prefetch src data into L2 cache. 1356 __ dcbt(R3_ARG1, 0); 1357 1358 // If supported set DSCR pre-fetch to deepest. 1359 if (VM_Version::has_mfdscr()) { 1360 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1361 __ mtdscr(tmp2); 1362 } 1363 __ li(tmp1, 16); 1364 1365 // Backbranch target aligned to 32-byte. It's not aligned 16-byte 1366 // as loop contains < 8 instructions that fit inside a single 1367 // i-cache sector. 1368 __ align(32); 1369 1370 __ bind(l_9); 1371 // Use loop with VSX load/store instructions to 1372 // copy 16 elements a time. 1373 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load from src. 1374 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst. 1375 __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1); // Load from src + 16. 1376 __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16. 1377 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32. 1378 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32. 1379 __ bdnz(l_9); // Dec CTR and loop if not zero. 1380 1381 // Restore DSCR pre-fetch value. 1382 if (VM_Version::has_mfdscr()) { 1383 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1384 __ mtdscr(tmp2); 1385 } 1386 1387 } 1388 } // FasterArrayCopy 1389 __ bind(l_6); 1390 1391 // copy 2 elements at a time 1392 { // FasterArrayCopy 1393 __ cmpwi(CCR0, R5_ARG3, 2); 1394 __ blt(CCR0, l_1); 1395 __ srdi(tmp1, R5_ARG3, 1); 1396 __ andi_(R5_ARG3, R5_ARG3, 1); 1397 1398 __ addi(R3_ARG1, R3_ARG1, -4); 1399 __ addi(R4_ARG2, R4_ARG2, -4); 1400 __ mtctr(tmp1); 1401 1402 __ bind(l_3); 1403 __ lwzu(tmp2, 4, R3_ARG1); 1404 __ stwu(tmp2, 4, R4_ARG2); 1405 __ bdnz(l_3); 1406 1407 __ addi(R3_ARG1, R3_ARG1, 4); 1408 __ addi(R4_ARG2, R4_ARG2, 4); 1409 } 1410 1411 // do single element copy 1412 __ bind(l_1); 1413 __ cmpwi(CCR0, R5_ARG3, 0); 1414 __ beq(CCR0, l_4); 1415 1416 { // FasterArrayCopy 1417 __ mtctr(R5_ARG3); 1418 __ addi(R3_ARG1, R3_ARG1, -2); 1419 __ addi(R4_ARG2, R4_ARG2, -2); 1420 1421 __ bind(l_5); 1422 __ lhzu(tmp2, 2, R3_ARG1); 1423 __ sthu(tmp2, 2, R4_ARG2); 1424 __ bdnz(l_5); 1425 } 1426 } 1427 1428 __ bind(l_4); 1429 __ li(R3_RET, 0); // return 0 1430 __ blr(); 1431 1432 return start; 1433 } 1434 1435 // Generate stub for conjoint short copy. If "aligned" is true, the 1436 // "from" and "to" addresses are assumed to be heapword aligned. 1437 // 1438 // Arguments for generated stub: 1439 // from: R3_ARG1 1440 // to: R4_ARG2 1441 // count: R5_ARG3 treated as signed 1442 // 1443 address generate_conjoint_short_copy(bool aligned, const char * name) { 1444 StubCodeMark mark(this, "StubRoutines", name); 1445 address start = __ function_entry(); 1446 assert_positive_int(R5_ARG3); 1447 1448 Register tmp1 = R6_ARG4; 1449 Register tmp2 = R7_ARG5; 1450 Register tmp3 = R8_ARG6; 1451 1452 address nooverlap_target = aligned ? 1453 STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) : 1454 STUB_ENTRY(jshort_disjoint_arraycopy); 1455 1456 array_overlap_test(nooverlap_target, 1); 1457 1458 Label l_1, l_2; 1459 { 1460 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1461 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1462 __ sldi(tmp1, R5_ARG3, 1); 1463 __ b(l_2); 1464 __ bind(l_1); 1465 __ sthx(tmp2, R4_ARG2, tmp1); 1466 __ bind(l_2); 1467 __ addic_(tmp1, tmp1, -2); 1468 __ lhzx(tmp2, R3_ARG1, tmp1); 1469 __ bge(CCR0, l_1); 1470 } 1471 __ li(R3_RET, 0); // return 0 1472 __ blr(); 1473 1474 return start; 1475 } 1476 1477 // Generate core code for disjoint int copy (and oop copy on 32-bit). If "aligned" 1478 // is true, the "from" and "to" addresses are assumed to be heapword aligned. 1479 // 1480 // Arguments: 1481 // from: R3_ARG1 1482 // to: R4_ARG2 1483 // count: R5_ARG3 treated as signed 1484 // 1485 void generate_disjoint_int_copy_core(bool aligned) { 1486 Register tmp1 = R6_ARG4; 1487 Register tmp2 = R7_ARG5; 1488 Register tmp3 = R8_ARG6; 1489 Register tmp4 = R0; 1490 1491 VectorSRegister tmp_vsr1 = VSR1; 1492 VectorSRegister tmp_vsr2 = VSR2; 1493 1494 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7; 1495 1496 // for short arrays, just do single element copy 1497 __ li(tmp3, 0); 1498 __ cmpwi(CCR0, R5_ARG3, 5); 1499 __ ble(CCR0, l_2); 1500 1501 if (!aligned) { 1502 // check if arrays have same alignment mod 8. 1503 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1504 __ andi_(R0, tmp1, 7); 1505 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1506 __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time 1507 1508 // copy 1 element to align to and from on an 8 byte boundary 1509 __ andi_(R0, R3_ARG1, 7); 1510 __ beq(CCR0, l_4); 1511 1512 __ lwzx(tmp2, R3_ARG1, tmp3); 1513 __ addi(R5_ARG3, R5_ARG3, -1); 1514 __ stwx(tmp2, R4_ARG2, tmp3); 1515 { // FasterArrayCopy 1516 __ addi(R3_ARG1, R3_ARG1, 4); 1517 __ addi(R4_ARG2, R4_ARG2, 4); 1518 } 1519 __ bind(l_4); 1520 } 1521 1522 { // FasterArrayCopy 1523 __ cmpwi(CCR0, R5_ARG3, 7); 1524 __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain 1525 1526 __ srdi(tmp1, R5_ARG3, 3); 1527 __ andi_(R5_ARG3, R5_ARG3, 7); 1528 __ mtctr(tmp1); 1529 1530 if (!VM_Version::has_vsx()) { 1531 1532 __ bind(l_6); 1533 // Use unrolled version for mass copying (copy 8 elements a time). 1534 // Load feeding store gets zero latency on power6, however not on power 5. 1535 // Therefore, the following sequence is made for the good of both. 1536 __ ld(tmp1, 0, R3_ARG1); 1537 __ ld(tmp2, 8, R3_ARG1); 1538 __ ld(tmp3, 16, R3_ARG1); 1539 __ ld(tmp4, 24, R3_ARG1); 1540 __ std(tmp1, 0, R4_ARG2); 1541 __ std(tmp2, 8, R4_ARG2); 1542 __ std(tmp3, 16, R4_ARG2); 1543 __ std(tmp4, 24, R4_ARG2); 1544 __ addi(R3_ARG1, R3_ARG1, 32); 1545 __ addi(R4_ARG2, R4_ARG2, 32); 1546 __ bdnz(l_6); 1547 1548 } else { // Processor supports VSX, so use it to mass copy. 1549 1550 // Prefetch the data into the L2 cache. 1551 __ dcbt(R3_ARG1, 0); 1552 1553 // If supported set DSCR pre-fetch to deepest. 1554 if (VM_Version::has_mfdscr()) { 1555 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1556 __ mtdscr(tmp2); 1557 } 1558 1559 __ li(tmp1, 16); 1560 1561 // Backbranch target aligned to 32-byte. Not 16-byte align as 1562 // loop contains < 8 instructions that fit inside a single 1563 // i-cache sector. 1564 __ align(32); 1565 1566 __ bind(l_7); 1567 // Use loop with VSX load/store instructions to 1568 // copy 8 elements a time. 1569 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1570 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1571 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1572 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1573 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1574 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1575 __ bdnz(l_7); // Dec CTR and loop if not zero. 1576 1577 // Restore DSCR pre-fetch value. 1578 if (VM_Version::has_mfdscr()) { 1579 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1580 __ mtdscr(tmp2); 1581 } 1582 1583 } // VSX 1584 } // FasterArrayCopy 1585 1586 // copy 1 element at a time 1587 __ bind(l_2); 1588 __ cmpwi(CCR0, R5_ARG3, 0); 1589 __ beq(CCR0, l_1); 1590 1591 { // FasterArrayCopy 1592 __ mtctr(R5_ARG3); 1593 __ addi(R3_ARG1, R3_ARG1, -4); 1594 __ addi(R4_ARG2, R4_ARG2, -4); 1595 1596 __ bind(l_3); 1597 __ lwzu(tmp2, 4, R3_ARG1); 1598 __ stwu(tmp2, 4, R4_ARG2); 1599 __ bdnz(l_3); 1600 } 1601 1602 __ bind(l_1); 1603 return; 1604 } 1605 1606 // Generate stub for disjoint int copy. If "aligned" is true, the 1607 // "from" and "to" addresses are assumed to be heapword aligned. 1608 // 1609 // Arguments for generated stub: 1610 // from: R3_ARG1 1611 // to: R4_ARG2 1612 // count: R5_ARG3 treated as signed 1613 // 1614 address generate_disjoint_int_copy(bool aligned, const char * name) { 1615 StubCodeMark mark(this, "StubRoutines", name); 1616 address start = __ function_entry(); 1617 assert_positive_int(R5_ARG3); 1618 { 1619 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1620 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1621 generate_disjoint_int_copy_core(aligned); 1622 } 1623 __ li(R3_RET, 0); // return 0 1624 __ blr(); 1625 return start; 1626 } 1627 1628 // Generate core code for conjoint int copy (and oop copy on 1629 // 32-bit). If "aligned" is true, the "from" and "to" addresses 1630 // are assumed to be heapword aligned. 1631 // 1632 // Arguments: 1633 // from: R3_ARG1 1634 // to: R4_ARG2 1635 // count: R5_ARG3 treated as signed 1636 // 1637 void generate_conjoint_int_copy_core(bool aligned) { 1638 // Do reverse copy. We assume the case of actual overlap is rare enough 1639 // that we don't have to optimize it. 1640 1641 Label l_1, l_2, l_3, l_4, l_5, l_6, l_7; 1642 1643 Register tmp1 = R6_ARG4; 1644 Register tmp2 = R7_ARG5; 1645 Register tmp3 = R8_ARG6; 1646 Register tmp4 = R0; 1647 1648 VectorSRegister tmp_vsr1 = VSR1; 1649 VectorSRegister tmp_vsr2 = VSR2; 1650 1651 { // FasterArrayCopy 1652 __ cmpwi(CCR0, R5_ARG3, 0); 1653 __ beq(CCR0, l_6); 1654 1655 __ sldi(R5_ARG3, R5_ARG3, 2); 1656 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1657 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1658 __ srdi(R5_ARG3, R5_ARG3, 2); 1659 1660 if (!aligned) { 1661 // check if arrays have same alignment mod 8. 1662 __ xorr(tmp1, R3_ARG1, R4_ARG2); 1663 __ andi_(R0, tmp1, 7); 1664 // Not the same alignment, but ld and std just need to be 4 byte aligned. 1665 __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time 1666 1667 // copy 1 element to align to and from on an 8 byte boundary 1668 __ andi_(R0, R3_ARG1, 7); 1669 __ beq(CCR0, l_7); 1670 1671 __ addi(R3_ARG1, R3_ARG1, -4); 1672 __ addi(R4_ARG2, R4_ARG2, -4); 1673 __ addi(R5_ARG3, R5_ARG3, -1); 1674 __ lwzx(tmp2, R3_ARG1); 1675 __ stwx(tmp2, R4_ARG2); 1676 __ bind(l_7); 1677 } 1678 1679 __ cmpwi(CCR0, R5_ARG3, 7); 1680 __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain 1681 1682 __ srdi(tmp1, R5_ARG3, 3); 1683 __ andi(R5_ARG3, R5_ARG3, 7); 1684 __ mtctr(tmp1); 1685 1686 if (!VM_Version::has_vsx()) { 1687 __ bind(l_4); 1688 // Use unrolled version for mass copying (copy 4 elements a time). 1689 // Load feeding store gets zero latency on Power6, however not on Power5. 1690 // Therefore, the following sequence is made for the good of both. 1691 __ addi(R3_ARG1, R3_ARG1, -32); 1692 __ addi(R4_ARG2, R4_ARG2, -32); 1693 __ ld(tmp4, 24, R3_ARG1); 1694 __ ld(tmp3, 16, R3_ARG1); 1695 __ ld(tmp2, 8, R3_ARG1); 1696 __ ld(tmp1, 0, R3_ARG1); 1697 __ std(tmp4, 24, R4_ARG2); 1698 __ std(tmp3, 16, R4_ARG2); 1699 __ std(tmp2, 8, R4_ARG2); 1700 __ std(tmp1, 0, R4_ARG2); 1701 __ bdnz(l_4); 1702 } else { // Processor supports VSX, so use it to mass copy. 1703 // Prefetch the data into the L2 cache. 1704 __ dcbt(R3_ARG1, 0); 1705 1706 // If supported set DSCR pre-fetch to deepest. 1707 if (VM_Version::has_mfdscr()) { 1708 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1709 __ mtdscr(tmp2); 1710 } 1711 1712 __ li(tmp1, 16); 1713 1714 // Backbranch target aligned to 32-byte. Not 16-byte align as 1715 // loop contains < 8 instructions that fit inside a single 1716 // i-cache sector. 1717 __ align(32); 1718 1719 __ bind(l_4); 1720 // Use loop with VSX load/store instructions to 1721 // copy 8 elements a time. 1722 __ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32 1723 __ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32 1724 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16 1725 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1726 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16 1727 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1728 __ bdnz(l_4); 1729 1730 // Restore DSCR pre-fetch value. 1731 if (VM_Version::has_mfdscr()) { 1732 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1733 __ mtdscr(tmp2); 1734 } 1735 } 1736 1737 __ cmpwi(CCR0, R5_ARG3, 0); 1738 __ beq(CCR0, l_6); 1739 1740 __ bind(l_5); 1741 __ mtctr(R5_ARG3); 1742 __ bind(l_3); 1743 __ lwz(R0, -4, R3_ARG1); 1744 __ stw(R0, -4, R4_ARG2); 1745 __ addi(R3_ARG1, R3_ARG1, -4); 1746 __ addi(R4_ARG2, R4_ARG2, -4); 1747 __ bdnz(l_3); 1748 1749 __ bind(l_6); 1750 } 1751 } 1752 1753 // Generate stub for conjoint int copy. If "aligned" is true, the 1754 // "from" and "to" addresses are assumed to be heapword aligned. 1755 // 1756 // Arguments for generated stub: 1757 // from: R3_ARG1 1758 // to: R4_ARG2 1759 // count: R5_ARG3 treated as signed 1760 // 1761 address generate_conjoint_int_copy(bool aligned, const char * name) { 1762 StubCodeMark mark(this, "StubRoutines", name); 1763 address start = __ function_entry(); 1764 assert_positive_int(R5_ARG3); 1765 address nooverlap_target = aligned ? 1766 STUB_ENTRY(arrayof_jint_disjoint_arraycopy) : 1767 STUB_ENTRY(jint_disjoint_arraycopy); 1768 1769 array_overlap_test(nooverlap_target, 2); 1770 { 1771 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1772 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1773 generate_conjoint_int_copy_core(aligned); 1774 } 1775 1776 __ li(R3_RET, 0); // return 0 1777 __ blr(); 1778 1779 return start; 1780 } 1781 1782 // Generate core code for disjoint long copy (and oop copy on 1783 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1784 // are assumed to be heapword aligned. 1785 // 1786 // Arguments: 1787 // from: R3_ARG1 1788 // to: R4_ARG2 1789 // count: R5_ARG3 treated as signed 1790 // 1791 void generate_disjoint_long_copy_core(bool aligned) { 1792 Register tmp1 = R6_ARG4; 1793 Register tmp2 = R7_ARG5; 1794 Register tmp3 = R8_ARG6; 1795 Register tmp4 = R0; 1796 1797 Label l_1, l_2, l_3, l_4, l_5; 1798 1799 VectorSRegister tmp_vsr1 = VSR1; 1800 VectorSRegister tmp_vsr2 = VSR2; 1801 1802 { // FasterArrayCopy 1803 __ cmpwi(CCR0, R5_ARG3, 3); 1804 __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain 1805 1806 __ srdi(tmp1, R5_ARG3, 2); 1807 __ andi_(R5_ARG3, R5_ARG3, 3); 1808 __ mtctr(tmp1); 1809 1810 if (!VM_Version::has_vsx()) { 1811 __ bind(l_4); 1812 // Use unrolled version for mass copying (copy 4 elements a time). 1813 // Load feeding store gets zero latency on Power6, however not on Power5. 1814 // Therefore, the following sequence is made for the good of both. 1815 __ ld(tmp1, 0, R3_ARG1); 1816 __ ld(tmp2, 8, R3_ARG1); 1817 __ ld(tmp3, 16, R3_ARG1); 1818 __ ld(tmp4, 24, R3_ARG1); 1819 __ std(tmp1, 0, R4_ARG2); 1820 __ std(tmp2, 8, R4_ARG2); 1821 __ std(tmp3, 16, R4_ARG2); 1822 __ std(tmp4, 24, R4_ARG2); 1823 __ addi(R3_ARG1, R3_ARG1, 32); 1824 __ addi(R4_ARG2, R4_ARG2, 32); 1825 __ bdnz(l_4); 1826 1827 } else { // Processor supports VSX, so use it to mass copy. 1828 1829 // Prefetch the data into the L2 cache. 1830 __ dcbt(R3_ARG1, 0); 1831 1832 // If supported set DSCR pre-fetch to deepest. 1833 if (VM_Version::has_mfdscr()) { 1834 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1835 __ mtdscr(tmp2); 1836 } 1837 1838 __ li(tmp1, 16); 1839 1840 // Backbranch target aligned to 32-byte. Not 16-byte align as 1841 // loop contains < 8 instructions that fit inside a single 1842 // i-cache sector. 1843 __ align(32); 1844 1845 __ bind(l_5); 1846 // Use loop with VSX load/store instructions to 1847 // copy 4 elements a time. 1848 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1849 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1850 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src + 16 1851 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16 1852 __ addi(R3_ARG1, R3_ARG1, 32); // Update src+=32 1853 __ addi(R4_ARG2, R4_ARG2, 32); // Update dsc+=32 1854 __ bdnz(l_5); // Dec CTR and loop if not zero. 1855 1856 // Restore DSCR pre-fetch value. 1857 if (VM_Version::has_mfdscr()) { 1858 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1859 __ mtdscr(tmp2); 1860 } 1861 1862 } // VSX 1863 } // FasterArrayCopy 1864 1865 // copy 1 element at a time 1866 __ bind(l_3); 1867 __ cmpwi(CCR0, R5_ARG3, 0); 1868 __ beq(CCR0, l_1); 1869 1870 { // FasterArrayCopy 1871 __ mtctr(R5_ARG3); 1872 __ addi(R3_ARG1, R3_ARG1, -8); 1873 __ addi(R4_ARG2, R4_ARG2, -8); 1874 1875 __ bind(l_2); 1876 __ ldu(R0, 8, R3_ARG1); 1877 __ stdu(R0, 8, R4_ARG2); 1878 __ bdnz(l_2); 1879 1880 } 1881 __ bind(l_1); 1882 } 1883 1884 // Generate stub for disjoint long copy. If "aligned" is true, the 1885 // "from" and "to" addresses are assumed to be heapword aligned. 1886 // 1887 // Arguments for generated stub: 1888 // from: R3_ARG1 1889 // to: R4_ARG2 1890 // count: R5_ARG3 treated as signed 1891 // 1892 address generate_disjoint_long_copy(bool aligned, const char * name) { 1893 StubCodeMark mark(this, "StubRoutines", name); 1894 address start = __ function_entry(); 1895 assert_positive_int(R5_ARG3); 1896 { 1897 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 1898 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 1899 generate_disjoint_long_copy_core(aligned); 1900 } 1901 __ li(R3_RET, 0); // return 0 1902 __ blr(); 1903 1904 return start; 1905 } 1906 1907 // Generate core code for conjoint long copy (and oop copy on 1908 // 64-bit). If "aligned" is true, the "from" and "to" addresses 1909 // are assumed to be heapword aligned. 1910 // 1911 // Arguments: 1912 // from: R3_ARG1 1913 // to: R4_ARG2 1914 // count: R5_ARG3 treated as signed 1915 // 1916 void generate_conjoint_long_copy_core(bool aligned) { 1917 Register tmp1 = R6_ARG4; 1918 Register tmp2 = R7_ARG5; 1919 Register tmp3 = R8_ARG6; 1920 Register tmp4 = R0; 1921 1922 VectorSRegister tmp_vsr1 = VSR1; 1923 VectorSRegister tmp_vsr2 = VSR2; 1924 1925 Label l_1, l_2, l_3, l_4, l_5; 1926 1927 __ cmpwi(CCR0, R5_ARG3, 0); 1928 __ beq(CCR0, l_1); 1929 1930 { // FasterArrayCopy 1931 __ sldi(R5_ARG3, R5_ARG3, 3); 1932 __ add(R3_ARG1, R3_ARG1, R5_ARG3); 1933 __ add(R4_ARG2, R4_ARG2, R5_ARG3); 1934 __ srdi(R5_ARG3, R5_ARG3, 3); 1935 1936 __ cmpwi(CCR0, R5_ARG3, 3); 1937 __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain 1938 1939 __ srdi(tmp1, R5_ARG3, 2); 1940 __ andi(R5_ARG3, R5_ARG3, 3); 1941 __ mtctr(tmp1); 1942 1943 if (!VM_Version::has_vsx()) { 1944 __ bind(l_4); 1945 // Use unrolled version for mass copying (copy 4 elements a time). 1946 // Load feeding store gets zero latency on Power6, however not on Power5. 1947 // Therefore, the following sequence is made for the good of both. 1948 __ addi(R3_ARG1, R3_ARG1, -32); 1949 __ addi(R4_ARG2, R4_ARG2, -32); 1950 __ ld(tmp4, 24, R3_ARG1); 1951 __ ld(tmp3, 16, R3_ARG1); 1952 __ ld(tmp2, 8, R3_ARG1); 1953 __ ld(tmp1, 0, R3_ARG1); 1954 __ std(tmp4, 24, R4_ARG2); 1955 __ std(tmp3, 16, R4_ARG2); 1956 __ std(tmp2, 8, R4_ARG2); 1957 __ std(tmp1, 0, R4_ARG2); 1958 __ bdnz(l_4); 1959 } else { // Processor supports VSX, so use it to mass copy. 1960 // Prefetch the data into the L2 cache. 1961 __ dcbt(R3_ARG1, 0); 1962 1963 // If supported set DSCR pre-fetch to deepest. 1964 if (VM_Version::has_mfdscr()) { 1965 __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7); 1966 __ mtdscr(tmp2); 1967 } 1968 1969 __ li(tmp1, 16); 1970 1971 // Backbranch target aligned to 32-byte. Not 16-byte align as 1972 // loop contains < 8 instructions that fit inside a single 1973 // i-cache sector. 1974 __ align(32); 1975 1976 __ bind(l_4); 1977 // Use loop with VSX load/store instructions to 1978 // copy 4 elements a time. 1979 __ addi(R3_ARG1, R3_ARG1, -32); // Update src-=32 1980 __ addi(R4_ARG2, R4_ARG2, -32); // Update dsc-=32 1981 __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1); // Load src+16 1982 __ lxvd2x(tmp_vsr1, R3_ARG1); // Load src 1983 __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16 1984 __ stxvd2x(tmp_vsr1, R4_ARG2); // Store to dst 1985 __ bdnz(l_4); 1986 1987 // Restore DSCR pre-fetch value. 1988 if (VM_Version::has_mfdscr()) { 1989 __ load_const_optimized(tmp2, VM_Version::_dscr_val); 1990 __ mtdscr(tmp2); 1991 } 1992 } 1993 1994 __ cmpwi(CCR0, R5_ARG3, 0); 1995 __ beq(CCR0, l_1); 1996 1997 __ bind(l_5); 1998 __ mtctr(R5_ARG3); 1999 __ bind(l_3); 2000 __ ld(R0, -8, R3_ARG1); 2001 __ std(R0, -8, R4_ARG2); 2002 __ addi(R3_ARG1, R3_ARG1, -8); 2003 __ addi(R4_ARG2, R4_ARG2, -8); 2004 __ bdnz(l_3); 2005 2006 } 2007 __ bind(l_1); 2008 } 2009 2010 // Generate stub for conjoint long copy. If "aligned" is true, the 2011 // "from" and "to" addresses are assumed to be heapword aligned. 2012 // 2013 // Arguments for generated stub: 2014 // from: R3_ARG1 2015 // to: R4_ARG2 2016 // count: R5_ARG3 treated as signed 2017 // 2018 address generate_conjoint_long_copy(bool aligned, const char * name) { 2019 StubCodeMark mark(this, "StubRoutines", name); 2020 address start = __ function_entry(); 2021 assert_positive_int(R5_ARG3); 2022 address nooverlap_target = aligned ? 2023 STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) : 2024 STUB_ENTRY(jlong_disjoint_arraycopy); 2025 2026 array_overlap_test(nooverlap_target, 3); 2027 { 2028 // UnsafeCopyMemory page error: continue at UnsafeCopyMemory common_error_exit 2029 UnsafeCopyMemoryMark ucmm(this, !aligned, false); 2030 generate_conjoint_long_copy_core(aligned); 2031 } 2032 __ li(R3_RET, 0); // return 0 2033 __ blr(); 2034 2035 return start; 2036 } 2037 2038 // Generate stub for conjoint oop copy. If "aligned" is true, the 2039 // "from" and "to" addresses are assumed to be heapword aligned. 2040 // 2041 // Arguments for generated stub: 2042 // from: R3_ARG1 2043 // to: R4_ARG2 2044 // count: R5_ARG3 treated as signed 2045 // dest_uninitialized: G1 support 2046 // 2047 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 2048 StubCodeMark mark(this, "StubRoutines", name); 2049 2050 address start = __ function_entry(); 2051 assert_positive_int(R5_ARG3); 2052 address nooverlap_target = aligned ? 2053 STUB_ENTRY(arrayof_oop_disjoint_arraycopy) : 2054 STUB_ENTRY(oop_disjoint_arraycopy); 2055 2056 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2057 if (dest_uninitialized) { 2058 decorators |= IS_DEST_UNINITIALIZED; 2059 } 2060 if (aligned) { 2061 decorators |= ARRAYCOPY_ALIGNED; 2062 } 2063 2064 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2065 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); 2066 2067 if (UseCompressedOops) { 2068 array_overlap_test(nooverlap_target, 2); 2069 generate_conjoint_int_copy_core(aligned); 2070 } else { 2071 array_overlap_test(nooverlap_target, 3); 2072 generate_conjoint_long_copy_core(aligned); 2073 } 2074 2075 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); 2076 __ li(R3_RET, 0); // return 0 2077 __ blr(); 2078 return start; 2079 } 2080 2081 // Generate stub for disjoint oop copy. If "aligned" is true, the 2082 // "from" and "to" addresses are assumed to be heapword aligned. 2083 // 2084 // Arguments for generated stub: 2085 // from: R3_ARG1 2086 // to: R4_ARG2 2087 // count: R5_ARG3 treated as signed 2088 // dest_uninitialized: G1 support 2089 // 2090 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 2091 StubCodeMark mark(this, "StubRoutines", name); 2092 address start = __ function_entry(); 2093 assert_positive_int(R5_ARG3); 2094 2095 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2096 if (dest_uninitialized) { 2097 decorators |= IS_DEST_UNINITIALIZED; 2098 } 2099 if (aligned) { 2100 decorators |= ARRAYCOPY_ALIGNED; 2101 } 2102 2103 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2104 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); 2105 2106 if (UseCompressedOops) { 2107 generate_disjoint_int_copy_core(aligned); 2108 } else { 2109 generate_disjoint_long_copy_core(aligned); 2110 } 2111 2112 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); 2113 __ li(R3_RET, 0); // return 0 2114 __ blr(); 2115 2116 return start; 2117 } 2118 2119 2120 // Helper for generating a dynamic type check. 2121 // Smashes only the given temp registers. 2122 void generate_type_check(Register sub_klass, 2123 Register super_check_offset, 2124 Register super_klass, 2125 Register temp, 2126 Label& L_success) { 2127 assert_different_registers(sub_klass, super_check_offset, super_klass); 2128 2129 BLOCK_COMMENT("type_check:"); 2130 2131 Label L_miss; 2132 2133 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL, 2134 super_check_offset); 2135 __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL); 2136 2137 // Fall through on failure! 2138 __ bind(L_miss); 2139 } 2140 2141 2142 // Generate stub for checked oop copy. 2143 // 2144 // Arguments for generated stub: 2145 // from: R3 2146 // to: R4 2147 // count: R5 treated as signed 2148 // ckoff: R6 (super_check_offset) 2149 // ckval: R7 (super_klass) 2150 // ret: R3 zero for success; (-1^K) where K is partial transfer count 2151 // 2152 address generate_checkcast_copy(const char *name, bool dest_uninitialized) { 2153 2154 const Register R3_from = R3_ARG1; // source array address 2155 const Register R4_to = R4_ARG2; // destination array address 2156 const Register R5_count = R5_ARG3; // elements count 2157 const Register R6_ckoff = R6_ARG4; // super_check_offset 2158 const Register R7_ckval = R7_ARG5; // super_klass 2159 2160 const Register R8_offset = R8_ARG6; // loop var, with stride wordSize 2161 const Register R9_remain = R9_ARG7; // loop var, with stride -1 2162 const Register R10_oop = R10_ARG8; // actual oop copied 2163 const Register R11_klass = R11_scratch1; // oop._klass 2164 const Register R12_tmp = R12_scratch2; 2165 2166 const Register R2_minus1 = R2; 2167 2168 //__ align(CodeEntryAlignment); 2169 StubCodeMark mark(this, "StubRoutines", name); 2170 address start = __ function_entry(); 2171 2172 // Assert that int is 64 bit sign extended and arrays are not conjoint. 2173 #ifdef ASSERT 2174 { 2175 assert_positive_int(R5_ARG3); 2176 const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2; 2177 Label no_overlap; 2178 __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes 2179 __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes 2180 __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison! 2181 __ cmpld(CCR1, tmp1, tmp2); 2182 __ crnand(CCR0, Assembler::less, CCR1, Assembler::less); 2183 // Overlaps if Src before dst and distance smaller than size. 2184 // Branch to forward copy routine otherwise. 2185 __ blt(CCR0, no_overlap); 2186 __ stop("overlap in checkcast_copy", 0x9543); 2187 __ bind(no_overlap); 2188 } 2189 #endif 2190 2191 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2192 if (dest_uninitialized) { 2193 decorators |= IS_DEST_UNINITIALIZED; 2194 } 2195 2196 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2197 bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval); 2198 2199 //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET); 2200 2201 Label load_element, store_element, store_null, success, do_epilogue; 2202 __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it. 2203 __ li(R8_offset, 0); // Offset from start of arrays. 2204 __ li(R2_minus1, -1); 2205 __ bne(CCR0, load_element); 2206 2207 // Empty array: Nothing to do. 2208 __ li(R3_RET, 0); // Return 0 on (trivial) success. 2209 __ blr(); 2210 2211 // ======== begin loop ======== 2212 // (Entry is load_element.) 2213 __ align(OptoLoopAlignment); 2214 __ bind(store_element); 2215 if (UseCompressedOops) { 2216 __ encode_heap_oop_not_null(R10_oop); 2217 __ bind(store_null); 2218 __ stw(R10_oop, R8_offset, R4_to); 2219 } else { 2220 __ bind(store_null); 2221 __ std(R10_oop, R8_offset, R4_to); 2222 } 2223 2224 __ addi(R8_offset, R8_offset, heapOopSize); // Step to next offset. 2225 __ add_(R9_remain, R2_minus1, R9_remain); // Decrement the count. 2226 __ beq(CCR0, success); 2227 2228 // ======== loop entry is here ======== 2229 __ bind(load_element); 2230 __ load_heap_oop(R10_oop, R8_offset, R3_from, R12_tmp, noreg, false, AS_RAW, &store_null); 2231 2232 __ load_klass(R11_klass, R10_oop); // Query the object klass. 2233 2234 generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp, 2235 // Branch to this on success: 2236 store_element); 2237 // ======== end loop ======== 2238 2239 // It was a real error; we must depend on the caller to finish the job. 2240 // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops. 2241 // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain), 2242 // and report their number to the caller. 2243 __ subf_(R5_count, R9_remain, R5_count); 2244 __ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller 2245 __ bne(CCR0, do_epilogue); 2246 __ blr(); 2247 2248 __ bind(success); 2249 __ li(R3_RET, 0); 2250 2251 __ bind(do_epilogue); 2252 bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET); 2253 2254 __ blr(); 2255 return start; 2256 } 2257 2258 2259 // Generate 'unsafe' array copy stub. 2260 // Though just as safe as the other stubs, it takes an unscaled 2261 // size_t argument instead of an element count. 2262 // 2263 // Arguments for generated stub: 2264 // from: R3 2265 // to: R4 2266 // count: R5 byte count, treated as ssize_t, can be zero 2267 // 2268 // Examines the alignment of the operands and dispatches 2269 // to a long, int, short, or byte copy loop. 2270 // 2271 address generate_unsafe_copy(const char* name, 2272 address byte_copy_entry, 2273 address short_copy_entry, 2274 address int_copy_entry, 2275 address long_copy_entry) { 2276 2277 const Register R3_from = R3_ARG1; // source array address 2278 const Register R4_to = R4_ARG2; // destination array address 2279 const Register R5_count = R5_ARG3; // elements count (as long on PPC64) 2280 2281 const Register R6_bits = R6_ARG4; // test copy of low bits 2282 const Register R7_tmp = R7_ARG5; 2283 2284 //__ align(CodeEntryAlignment); 2285 StubCodeMark mark(this, "StubRoutines", name); 2286 address start = __ function_entry(); 2287 2288 // Bump this on entry, not on exit: 2289 //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp); 2290 2291 Label short_copy, int_copy, long_copy; 2292 2293 __ orr(R6_bits, R3_from, R4_to); 2294 __ orr(R6_bits, R6_bits, R5_count); 2295 __ andi_(R0, R6_bits, (BytesPerLong-1)); 2296 __ beq(CCR0, long_copy); 2297 2298 __ andi_(R0, R6_bits, (BytesPerInt-1)); 2299 __ beq(CCR0, int_copy); 2300 2301 __ andi_(R0, R6_bits, (BytesPerShort-1)); 2302 __ beq(CCR0, short_copy); 2303 2304 // byte_copy: 2305 __ b(byte_copy_entry); 2306 2307 __ bind(short_copy); 2308 __ srwi(R5_count, R5_count, LogBytesPerShort); 2309 __ b(short_copy_entry); 2310 2311 __ bind(int_copy); 2312 __ srwi(R5_count, R5_count, LogBytesPerInt); 2313 __ b(int_copy_entry); 2314 2315 __ bind(long_copy); 2316 __ srwi(R5_count, R5_count, LogBytesPerLong); 2317 __ b(long_copy_entry); 2318 2319 return start; 2320 } 2321 2322 2323 // Perform range checks on the proposed arraycopy. 2324 // Kills the two temps, but nothing else. 2325 // Also, clean the sign bits of src_pos and dst_pos. 2326 void arraycopy_range_checks(Register src, // source array oop 2327 Register src_pos, // source position 2328 Register dst, // destination array oop 2329 Register dst_pos, // destination position 2330 Register length, // length of copy 2331 Register temp1, Register temp2, 2332 Label& L_failed) { 2333 BLOCK_COMMENT("arraycopy_range_checks:"); 2334 2335 const Register array_length = temp1; // scratch 2336 const Register end_pos = temp2; // scratch 2337 2338 // if (src_pos + length > arrayOop(src)->length() ) FAIL; 2339 __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src); 2340 __ add(end_pos, src_pos, length); // src_pos + length 2341 __ cmpd(CCR0, end_pos, array_length); 2342 __ bgt(CCR0, L_failed); 2343 2344 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL; 2345 __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst); 2346 __ add(end_pos, dst_pos, length); // src_pos + length 2347 __ cmpd(CCR0, end_pos, array_length); 2348 __ bgt(CCR0, L_failed); 2349 2350 BLOCK_COMMENT("arraycopy_range_checks done"); 2351 } 2352 2353 2354 // 2355 // Generate generic array copy stubs 2356 // 2357 // Input: 2358 // R3 - src oop 2359 // R4 - src_pos 2360 // R5 - dst oop 2361 // R6 - dst_pos 2362 // R7 - element count 2363 // 2364 // Output: 2365 // R3 == 0 - success 2366 // R3 == -1 - need to call System.arraycopy 2367 // 2368 address generate_generic_copy(const char *name, 2369 address entry_jbyte_arraycopy, 2370 address entry_jshort_arraycopy, 2371 address entry_jint_arraycopy, 2372 address entry_oop_arraycopy, 2373 address entry_disjoint_oop_arraycopy, 2374 address entry_jlong_arraycopy, 2375 address entry_checkcast_arraycopy) { 2376 Label L_failed, L_objArray; 2377 2378 // Input registers 2379 const Register src = R3_ARG1; // source array oop 2380 const Register src_pos = R4_ARG2; // source position 2381 const Register dst = R5_ARG3; // destination array oop 2382 const Register dst_pos = R6_ARG4; // destination position 2383 const Register length = R7_ARG5; // elements count 2384 2385 // registers used as temp 2386 const Register src_klass = R8_ARG6; // source array klass 2387 const Register dst_klass = R9_ARG7; // destination array klass 2388 const Register lh = R10_ARG8; // layout handler 2389 const Register temp = R2; 2390 2391 //__ align(CodeEntryAlignment); 2392 StubCodeMark mark(this, "StubRoutines", name); 2393 address start = __ function_entry(); 2394 2395 // Bump this on entry, not on exit: 2396 //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp); 2397 2398 // In principle, the int arguments could be dirty. 2399 2400 //----------------------------------------------------------------------- 2401 // Assembler stubs will be used for this call to arraycopy 2402 // if the following conditions are met: 2403 // 2404 // (1) src and dst must not be null. 2405 // (2) src_pos must not be negative. 2406 // (3) dst_pos must not be negative. 2407 // (4) length must not be negative. 2408 // (5) src klass and dst klass should be the same and not NULL. 2409 // (6) src and dst should be arrays. 2410 // (7) src_pos + length must not exceed length of src. 2411 // (8) dst_pos + length must not exceed length of dst. 2412 BLOCK_COMMENT("arraycopy initial argument checks"); 2413 2414 __ cmpdi(CCR1, src, 0); // if (src == NULL) return -1; 2415 __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1; 2416 __ cmpdi(CCR5, dst, 0); // if (dst == NULL) return -1; 2417 __ cror(CCR1, Assembler::equal, CCR0, Assembler::less); 2418 __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1; 2419 __ cror(CCR5, Assembler::equal, CCR0, Assembler::less); 2420 __ extsw_(length, length); // if (length < 0) return -1; 2421 __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal); 2422 __ cror(CCR1, Assembler::equal, CCR0, Assembler::less); 2423 __ beq(CCR1, L_failed); 2424 2425 BLOCK_COMMENT("arraycopy argument klass checks"); 2426 __ load_klass(src_klass, src); 2427 __ load_klass(dst_klass, dst); 2428 2429 // Load layout helper 2430 // 2431 // |array_tag| | header_size | element_type | |log2_element_size| 2432 // 32 30 24 16 8 2 0 2433 // 2434 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2435 // 2436 2437 int lh_offset = in_bytes(Klass::layout_helper_offset()); 2438 2439 // Load 32-bits signed value. Use br() instruction with it to check icc. 2440 __ lwz(lh, lh_offset, src_klass); 2441 2442 // Handle objArrays completely differently... 2443 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2444 __ load_const_optimized(temp, objArray_lh, R0); 2445 __ cmpw(CCR0, lh, temp); 2446 __ beq(CCR0, L_objArray); 2447 2448 __ cmpd(CCR5, src_klass, dst_klass); // if (src->klass() != dst->klass()) return -1; 2449 __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1; 2450 2451 __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less); 2452 __ beq(CCR5, L_failed); 2453 2454 // At this point, it is known to be a typeArray (array_tag 0x3). 2455 #ifdef ASSERT 2456 { Label L; 2457 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift); 2458 __ load_const_optimized(temp, lh_prim_tag_in_place, R0); 2459 __ cmpw(CCR0, lh, temp); 2460 __ bge(CCR0, L); 2461 __ stop("must be a primitive array"); 2462 __ bind(L); 2463 } 2464 #endif 2465 2466 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2467 temp, dst_klass, L_failed); 2468 2469 // TypeArrayKlass 2470 // 2471 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2472 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2473 // 2474 2475 const Register offset = dst_klass; // array offset 2476 const Register elsize = src_klass; // log2 element size 2477 2478 __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1)); 2479 __ andi(elsize, lh, Klass::_lh_log2_element_size_mask); 2480 __ add(src, offset, src); // src array offset 2481 __ add(dst, offset, dst); // dst array offset 2482 2483 // Next registers should be set before the jump to corresponding stub. 2484 const Register from = R3_ARG1; // source array address 2485 const Register to = R4_ARG2; // destination array address 2486 const Register count = R5_ARG3; // elements count 2487 2488 // 'from', 'to', 'count' registers should be set in this order 2489 // since they are the same as 'src', 'src_pos', 'dst'. 2490 2491 BLOCK_COMMENT("scale indexes to element size"); 2492 __ sld(src_pos, src_pos, elsize); 2493 __ sld(dst_pos, dst_pos, elsize); 2494 __ add(from, src_pos, src); // src_addr 2495 __ add(to, dst_pos, dst); // dst_addr 2496 __ mr(count, length); // length 2497 2498 BLOCK_COMMENT("choose copy loop based on element size"); 2499 // Using conditional branches with range 32kB. 2500 const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal); 2501 __ cmpwi(CCR0, elsize, 0); 2502 __ bc(bo, bi, entry_jbyte_arraycopy); 2503 __ cmpwi(CCR0, elsize, LogBytesPerShort); 2504 __ bc(bo, bi, entry_jshort_arraycopy); 2505 __ cmpwi(CCR0, elsize, LogBytesPerInt); 2506 __ bc(bo, bi, entry_jint_arraycopy); 2507 #ifdef ASSERT 2508 { Label L; 2509 __ cmpwi(CCR0, elsize, LogBytesPerLong); 2510 __ beq(CCR0, L); 2511 __ stop("must be long copy, but elsize is wrong"); 2512 __ bind(L); 2513 } 2514 #endif 2515 __ b(entry_jlong_arraycopy); 2516 2517 // ObjArrayKlass 2518 __ bind(L_objArray); 2519 // live at this point: src_klass, dst_klass, src[_pos], dst[_pos], length 2520 2521 Label L_disjoint_plain_copy, L_checkcast_copy; 2522 // test array classes for subtyping 2523 __ cmpd(CCR0, src_klass, dst_klass); // usual case is exact equality 2524 __ bne(CCR0, L_checkcast_copy); 2525 2526 // Identically typed arrays can be copied without element-wise checks. 2527 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2528 temp, lh, L_failed); 2529 2530 __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset 2531 __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset 2532 __ sldi(src_pos, src_pos, LogBytesPerHeapOop); 2533 __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop); 2534 __ add(from, src_pos, src); // src_addr 2535 __ add(to, dst_pos, dst); // dst_addr 2536 __ mr(count, length); // length 2537 __ b(entry_oop_arraycopy); 2538 2539 __ bind(L_checkcast_copy); 2540 // live at this point: src_klass, dst_klass 2541 { 2542 // Before looking at dst.length, make sure dst is also an objArray. 2543 __ lwz(temp, lh_offset, dst_klass); 2544 __ cmpw(CCR0, lh, temp); 2545 __ bne(CCR0, L_failed); 2546 2547 // It is safe to examine both src.length and dst.length. 2548 arraycopy_range_checks(src, src_pos, dst, dst_pos, length, 2549 temp, lh, L_failed); 2550 2551 // Marshal the base address arguments now, freeing registers. 2552 __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset 2553 __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset 2554 __ sldi(src_pos, src_pos, LogBytesPerHeapOop); 2555 __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop); 2556 __ add(from, src_pos, src); // src_addr 2557 __ add(to, dst_pos, dst); // dst_addr 2558 __ mr(count, length); // length 2559 2560 Register sco_temp = R6_ARG4; // This register is free now. 2561 assert_different_registers(from, to, count, sco_temp, 2562 dst_klass, src_klass); 2563 2564 // Generate the type check. 2565 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2566 __ lwz(sco_temp, sco_offset, dst_klass); 2567 generate_type_check(src_klass, sco_temp, dst_klass, 2568 temp, L_disjoint_plain_copy); 2569 2570 // Fetch destination element klass from the ObjArrayKlass header. 2571 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2572 2573 // The checkcast_copy loop needs two extra arguments: 2574 __ ld(R7_ARG5, ek_offset, dst_klass); // dest elem klass 2575 __ lwz(R6_ARG4, sco_offset, R7_ARG5); // sco of elem klass 2576 __ b(entry_checkcast_arraycopy); 2577 } 2578 2579 __ bind(L_disjoint_plain_copy); 2580 __ b(entry_disjoint_oop_arraycopy); 2581 2582 __ bind(L_failed); 2583 __ li(R3_RET, -1); // return -1 2584 __ blr(); 2585 return start; 2586 } 2587 2588 // Arguments for generated stub: 2589 // R3_ARG1 - source byte array address 2590 // R4_ARG2 - destination byte array address 2591 // R5_ARG3 - round key array 2592 address generate_aescrypt_encryptBlock() { 2593 assert(UseAES, "need AES instructions and misaligned SSE support"); 2594 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2595 2596 address start = __ function_entry(); 2597 2598 Label L_doLast; 2599 2600 Register from = R3_ARG1; // source array address 2601 Register to = R4_ARG2; // destination array address 2602 Register key = R5_ARG3; // round key array 2603 2604 Register keylen = R8; 2605 Register temp = R9; 2606 Register keypos = R10; 2607 Register fifteen = R12; 2608 2609 VectorRegister vRet = VR0; 2610 2611 VectorRegister vKey1 = VR1; 2612 VectorRegister vKey2 = VR2; 2613 VectorRegister vKey3 = VR3; 2614 VectorRegister vKey4 = VR4; 2615 2616 VectorRegister fromPerm = VR5; 2617 VectorRegister keyPerm = VR6; 2618 VectorRegister toPerm = VR7; 2619 VectorRegister fSplt = VR8; 2620 2621 VectorRegister vTmp1 = VR9; 2622 VectorRegister vTmp2 = VR10; 2623 VectorRegister vTmp3 = VR11; 2624 VectorRegister vTmp4 = VR12; 2625 2626 __ li (fifteen, 15); 2627 2628 // load unaligned from[0-15] to vsRet 2629 __ lvx (vRet, from); 2630 __ lvx (vTmp1, fifteen, from); 2631 __ lvsl (fromPerm, from); 2632 #ifdef VM_LITTLE_ENDIAN 2633 __ vspltisb (fSplt, 0x0f); 2634 __ vxor (fromPerm, fromPerm, fSplt); 2635 #endif 2636 __ vperm (vRet, vRet, vTmp1, fromPerm); 2637 2638 // load keylen (44 or 52 or 60) 2639 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2640 2641 // to load keys 2642 __ load_perm (keyPerm, key); 2643 #ifdef VM_LITTLE_ENDIAN 2644 __ vspltisb (vTmp2, -16); 2645 __ vrld (keyPerm, keyPerm, vTmp2); 2646 __ vrld (keyPerm, keyPerm, vTmp2); 2647 __ vsldoi (keyPerm, keyPerm, keyPerm, 8); 2648 #endif 2649 2650 // load the 1st round key to vTmp1 2651 __ lvx (vTmp1, key); 2652 __ li (keypos, 16); 2653 __ lvx (vKey1, keypos, key); 2654 __ vec_perm (vTmp1, vKey1, keyPerm); 2655 2656 // 1st round 2657 __ vxor (vRet, vRet, vTmp1); 2658 2659 // load the 2nd round key to vKey1 2660 __ li (keypos, 32); 2661 __ lvx (vKey2, keypos, key); 2662 __ vec_perm (vKey1, vKey2, keyPerm); 2663 2664 // load the 3rd round key to vKey2 2665 __ li (keypos, 48); 2666 __ lvx (vKey3, keypos, key); 2667 __ vec_perm (vKey2, vKey3, keyPerm); 2668 2669 // load the 4th round key to vKey3 2670 __ li (keypos, 64); 2671 __ lvx (vKey4, keypos, key); 2672 __ vec_perm (vKey3, vKey4, keyPerm); 2673 2674 // load the 5th round key to vKey4 2675 __ li (keypos, 80); 2676 __ lvx (vTmp1, keypos, key); 2677 __ vec_perm (vKey4, vTmp1, keyPerm); 2678 2679 // 2nd - 5th rounds 2680 __ vcipher (vRet, vRet, vKey1); 2681 __ vcipher (vRet, vRet, vKey2); 2682 __ vcipher (vRet, vRet, vKey3); 2683 __ vcipher (vRet, vRet, vKey4); 2684 2685 // load the 6th round key to vKey1 2686 __ li (keypos, 96); 2687 __ lvx (vKey2, keypos, key); 2688 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2689 2690 // load the 7th round key to vKey2 2691 __ li (keypos, 112); 2692 __ lvx (vKey3, keypos, key); 2693 __ vec_perm (vKey2, vKey3, keyPerm); 2694 2695 // load the 8th round key to vKey3 2696 __ li (keypos, 128); 2697 __ lvx (vKey4, keypos, key); 2698 __ vec_perm (vKey3, vKey4, keyPerm); 2699 2700 // load the 9th round key to vKey4 2701 __ li (keypos, 144); 2702 __ lvx (vTmp1, keypos, key); 2703 __ vec_perm (vKey4, vTmp1, keyPerm); 2704 2705 // 6th - 9th rounds 2706 __ vcipher (vRet, vRet, vKey1); 2707 __ vcipher (vRet, vRet, vKey2); 2708 __ vcipher (vRet, vRet, vKey3); 2709 __ vcipher (vRet, vRet, vKey4); 2710 2711 // load the 10th round key to vKey1 2712 __ li (keypos, 160); 2713 __ lvx (vKey2, keypos, key); 2714 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2715 2716 // load the 11th round key to vKey2 2717 __ li (keypos, 176); 2718 __ lvx (vTmp1, keypos, key); 2719 __ vec_perm (vKey2, vTmp1, keyPerm); 2720 2721 // if all round keys are loaded, skip next 4 rounds 2722 __ cmpwi (CCR0, keylen, 44); 2723 __ beq (CCR0, L_doLast); 2724 2725 // 10th - 11th rounds 2726 __ vcipher (vRet, vRet, vKey1); 2727 __ vcipher (vRet, vRet, vKey2); 2728 2729 // load the 12th round key to vKey1 2730 __ li (keypos, 192); 2731 __ lvx (vKey2, keypos, key); 2732 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2733 2734 // load the 13th round key to vKey2 2735 __ li (keypos, 208); 2736 __ lvx (vTmp1, keypos, key); 2737 __ vec_perm (vKey2, vTmp1, keyPerm); 2738 2739 // if all round keys are loaded, skip next 2 rounds 2740 __ cmpwi (CCR0, keylen, 52); 2741 __ beq (CCR0, L_doLast); 2742 2743 // 12th - 13th rounds 2744 __ vcipher (vRet, vRet, vKey1); 2745 __ vcipher (vRet, vRet, vKey2); 2746 2747 // load the 14th round key to vKey1 2748 __ li (keypos, 224); 2749 __ lvx (vKey2, keypos, key); 2750 __ vec_perm (vKey1, vTmp1, vKey2, keyPerm); 2751 2752 // load the 15th round key to vKey2 2753 __ li (keypos, 240); 2754 __ lvx (vTmp1, keypos, key); 2755 __ vec_perm (vKey2, vTmp1, keyPerm); 2756 2757 __ bind(L_doLast); 2758 2759 // last two rounds 2760 __ vcipher (vRet, vRet, vKey1); 2761 __ vcipherlast (vRet, vRet, vKey2); 2762 2763 // store result (unaligned) 2764 #ifdef VM_LITTLE_ENDIAN 2765 __ lvsl (toPerm, to); 2766 #else 2767 __ lvsr (toPerm, to); 2768 #endif 2769 __ vspltisb (vTmp3, -1); 2770 __ vspltisb (vTmp4, 0); 2771 __ lvx (vTmp1, to); 2772 __ lvx (vTmp2, fifteen, to); 2773 #ifdef VM_LITTLE_ENDIAN 2774 __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask 2775 __ vxor (toPerm, toPerm, fSplt); // swap bytes 2776 #else 2777 __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask 2778 #endif 2779 __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data 2780 __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); 2781 __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); 2782 __ stvx (vTmp2, fifteen, to); // store this one first (may alias) 2783 __ stvx (vTmp1, to); 2784 2785 __ blr(); 2786 return start; 2787 } 2788 2789 // Arguments for generated stub: 2790 // R3_ARG1 - source byte array address 2791 // R4_ARG2 - destination byte array address 2792 // R5_ARG3 - K (key) in little endian int array 2793 address generate_aescrypt_decryptBlock() { 2794 assert(UseAES, "need AES instructions and misaligned SSE support"); 2795 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 2796 2797 address start = __ function_entry(); 2798 2799 Label L_doLast; 2800 Label L_do44; 2801 Label L_do52; 2802 2803 Register from = R3_ARG1; // source array address 2804 Register to = R4_ARG2; // destination array address 2805 Register key = R5_ARG3; // round key array 2806 2807 Register keylen = R8; 2808 Register temp = R9; 2809 Register keypos = R10; 2810 Register fifteen = R12; 2811 2812 VectorRegister vRet = VR0; 2813 2814 VectorRegister vKey1 = VR1; 2815 VectorRegister vKey2 = VR2; 2816 VectorRegister vKey3 = VR3; 2817 VectorRegister vKey4 = VR4; 2818 VectorRegister vKey5 = VR5; 2819 2820 VectorRegister fromPerm = VR6; 2821 VectorRegister keyPerm = VR7; 2822 VectorRegister toPerm = VR8; 2823 VectorRegister fSplt = VR9; 2824 2825 VectorRegister vTmp1 = VR10; 2826 VectorRegister vTmp2 = VR11; 2827 VectorRegister vTmp3 = VR12; 2828 VectorRegister vTmp4 = VR13; 2829 2830 __ li (fifteen, 15); 2831 2832 // load unaligned from[0-15] to vsRet 2833 __ lvx (vRet, from); 2834 __ lvx (vTmp1, fifteen, from); 2835 __ lvsl (fromPerm, from); 2836 #ifdef VM_LITTLE_ENDIAN 2837 __ vspltisb (fSplt, 0x0f); 2838 __ vxor (fromPerm, fromPerm, fSplt); 2839 #endif 2840 __ vperm (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE] 2841 2842 // load keylen (44 or 52 or 60) 2843 __ lwz (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key); 2844 2845 // to load keys 2846 __ load_perm (keyPerm, key); 2847 #ifdef VM_LITTLE_ENDIAN 2848 __ vxor (vTmp2, vTmp2, vTmp2); 2849 __ vspltisb (vTmp2, -16); 2850 __ vrld (keyPerm, keyPerm, vTmp2); 2851 __ vrld (keyPerm, keyPerm, vTmp2); 2852 __ vsldoi (keyPerm, keyPerm, keyPerm, 8); 2853 #endif 2854 2855 __ cmpwi (CCR0, keylen, 44); 2856 __ beq (CCR0, L_do44); 2857 2858 __ cmpwi (CCR0, keylen, 52); 2859 __ beq (CCR0, L_do52); 2860 2861 // load the 15th round key to vKey1 2862 __ li (keypos, 240); 2863 __ lvx (vKey1, keypos, key); 2864 __ li (keypos, 224); 2865 __ lvx (vKey2, keypos, key); 2866 __ vec_perm (vKey1, vKey2, vKey1, keyPerm); 2867 2868 // load the 14th round key to vKey2 2869 __ li (keypos, 208); 2870 __ lvx (vKey3, keypos, key); 2871 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2872 2873 // load the 13th round key to vKey3 2874 __ li (keypos, 192); 2875 __ lvx (vKey4, keypos, key); 2876 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2877 2878 // load the 12th round key to vKey4 2879 __ li (keypos, 176); 2880 __ lvx (vKey5, keypos, key); 2881 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2882 2883 // load the 11th round key to vKey5 2884 __ li (keypos, 160); 2885 __ lvx (vTmp1, keypos, key); 2886 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2887 2888 // 1st - 5th rounds 2889 __ vxor (vRet, vRet, vKey1); 2890 __ vncipher (vRet, vRet, vKey2); 2891 __ vncipher (vRet, vRet, vKey3); 2892 __ vncipher (vRet, vRet, vKey4); 2893 __ vncipher (vRet, vRet, vKey5); 2894 2895 __ b (L_doLast); 2896 2897 __ bind (L_do52); 2898 2899 // load the 13th round key to vKey1 2900 __ li (keypos, 208); 2901 __ lvx (vKey1, keypos, key); 2902 __ li (keypos, 192); 2903 __ lvx (vKey2, keypos, key); 2904 __ vec_perm (vKey1, vKey2, vKey1, keyPerm); 2905 2906 // load the 12th round key to vKey2 2907 __ li (keypos, 176); 2908 __ lvx (vKey3, keypos, key); 2909 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2910 2911 // load the 11th round key to vKey3 2912 __ li (keypos, 160); 2913 __ lvx (vTmp1, keypos, key); 2914 __ vec_perm (vKey3, vTmp1, vKey3, keyPerm); 2915 2916 // 1st - 3rd rounds 2917 __ vxor (vRet, vRet, vKey1); 2918 __ vncipher (vRet, vRet, vKey2); 2919 __ vncipher (vRet, vRet, vKey3); 2920 2921 __ b (L_doLast); 2922 2923 __ bind (L_do44); 2924 2925 // load the 11th round key to vKey1 2926 __ li (keypos, 176); 2927 __ lvx (vKey1, keypos, key); 2928 __ li (keypos, 160); 2929 __ lvx (vTmp1, keypos, key); 2930 __ vec_perm (vKey1, vTmp1, vKey1, keyPerm); 2931 2932 // 1st round 2933 __ vxor (vRet, vRet, vKey1); 2934 2935 __ bind (L_doLast); 2936 2937 // load the 10th round key to vKey1 2938 __ li (keypos, 144); 2939 __ lvx (vKey2, keypos, key); 2940 __ vec_perm (vKey1, vKey2, vTmp1, keyPerm); 2941 2942 // load the 9th round key to vKey2 2943 __ li (keypos, 128); 2944 __ lvx (vKey3, keypos, key); 2945 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2946 2947 // load the 8th round key to vKey3 2948 __ li (keypos, 112); 2949 __ lvx (vKey4, keypos, key); 2950 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2951 2952 // load the 7th round key to vKey4 2953 __ li (keypos, 96); 2954 __ lvx (vKey5, keypos, key); 2955 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2956 2957 // load the 6th round key to vKey5 2958 __ li (keypos, 80); 2959 __ lvx (vTmp1, keypos, key); 2960 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2961 2962 // last 10th - 6th rounds 2963 __ vncipher (vRet, vRet, vKey1); 2964 __ vncipher (vRet, vRet, vKey2); 2965 __ vncipher (vRet, vRet, vKey3); 2966 __ vncipher (vRet, vRet, vKey4); 2967 __ vncipher (vRet, vRet, vKey5); 2968 2969 // load the 5th round key to vKey1 2970 __ li (keypos, 64); 2971 __ lvx (vKey2, keypos, key); 2972 __ vec_perm (vKey1, vKey2, vTmp1, keyPerm); 2973 2974 // load the 4th round key to vKey2 2975 __ li (keypos, 48); 2976 __ lvx (vKey3, keypos, key); 2977 __ vec_perm (vKey2, vKey3, vKey2, keyPerm); 2978 2979 // load the 3rd round key to vKey3 2980 __ li (keypos, 32); 2981 __ lvx (vKey4, keypos, key); 2982 __ vec_perm (vKey3, vKey4, vKey3, keyPerm); 2983 2984 // load the 2nd round key to vKey4 2985 __ li (keypos, 16); 2986 __ lvx (vKey5, keypos, key); 2987 __ vec_perm (vKey4, vKey5, vKey4, keyPerm); 2988 2989 // load the 1st round key to vKey5 2990 __ lvx (vTmp1, key); 2991 __ vec_perm (vKey5, vTmp1, vKey5, keyPerm); 2992 2993 // last 5th - 1th rounds 2994 __ vncipher (vRet, vRet, vKey1); 2995 __ vncipher (vRet, vRet, vKey2); 2996 __ vncipher (vRet, vRet, vKey3); 2997 __ vncipher (vRet, vRet, vKey4); 2998 __ vncipherlast (vRet, vRet, vKey5); 2999 3000 // store result (unaligned) 3001 #ifdef VM_LITTLE_ENDIAN 3002 __ lvsl (toPerm, to); 3003 #else 3004 __ lvsr (toPerm, to); 3005 #endif 3006 __ vspltisb (vTmp3, -1); 3007 __ vspltisb (vTmp4, 0); 3008 __ lvx (vTmp1, to); 3009 __ lvx (vTmp2, fifteen, to); 3010 #ifdef VM_LITTLE_ENDIAN 3011 __ vperm (vTmp3, vTmp3, vTmp4, toPerm); // generate select mask 3012 __ vxor (toPerm, toPerm, fSplt); // swap bytes 3013 #else 3014 __ vperm (vTmp3, vTmp4, vTmp3, toPerm); // generate select mask 3015 #endif 3016 __ vperm (vTmp4, vRet, vRet, toPerm); // rotate data 3017 __ vsel (vTmp2, vTmp4, vTmp2, vTmp3); 3018 __ vsel (vTmp1, vTmp1, vTmp4, vTmp3); 3019 __ stvx (vTmp2, fifteen, to); // store this one first (may alias) 3020 __ stvx (vTmp1, to); 3021 3022 __ blr(); 3023 return start; 3024 } 3025 3026 address generate_sha256_implCompress(bool multi_block, const char *name) { 3027 assert(UseSHA, "need SHA instructions"); 3028 StubCodeMark mark(this, "StubRoutines", name); 3029 address start = __ function_entry(); 3030 3031 __ sha256 (multi_block); 3032 3033 __ blr(); 3034 return start; 3035 } 3036 3037 address generate_sha512_implCompress(bool multi_block, const char *name) { 3038 assert(UseSHA, "need SHA instructions"); 3039 StubCodeMark mark(this, "StubRoutines", name); 3040 address start = __ function_entry(); 3041 3042 __ sha512 (multi_block); 3043 3044 __ blr(); 3045 return start; 3046 } 3047 3048 void generate_arraycopy_stubs() { 3049 // Note: the disjoint stubs must be generated first, some of 3050 // the conjoint stubs use them. 3051 3052 address ucm_common_error_exit = generate_unsafecopy_common_error_exit(); 3053 UnsafeCopyMemory::set_common_exit_stub_pc(ucm_common_error_exit); 3054 3055 // non-aligned disjoint versions 3056 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 3057 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 3058 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); 3059 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); 3060 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false); 3061 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true); 3062 3063 // aligned disjoint versions 3064 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); 3065 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 3066 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); 3067 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); 3068 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false); 3069 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true); 3070 3071 // non-aligned conjoint versions 3072 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 3073 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 3074 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); 3075 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); 3076 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false); 3077 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true); 3078 3079 // aligned conjoint versions 3080 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); 3081 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 3082 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); 3083 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy"); 3084 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); 3085 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); 3086 3087 // special/generic versions 3088 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", false); 3089 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true); 3090 3091 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3092 STUB_ENTRY(jbyte_arraycopy), 3093 STUB_ENTRY(jshort_arraycopy), 3094 STUB_ENTRY(jint_arraycopy), 3095 STUB_ENTRY(jlong_arraycopy)); 3096 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3097 STUB_ENTRY(jbyte_arraycopy), 3098 STUB_ENTRY(jshort_arraycopy), 3099 STUB_ENTRY(jint_arraycopy), 3100 STUB_ENTRY(oop_arraycopy), 3101 STUB_ENTRY(oop_disjoint_arraycopy), 3102 STUB_ENTRY(jlong_arraycopy), 3103 STUB_ENTRY(checkcast_arraycopy)); 3104 3105 // fill routines 3106 if (OptimizeFill) { 3107 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3108 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3109 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3110 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3111 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3112 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3113 } 3114 } 3115 3116 // Safefetch stubs. 3117 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 3118 // safefetch signatures: 3119 // int SafeFetch32(int* adr, int errValue); 3120 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3121 // 3122 // arguments: 3123 // R3_ARG1 = adr 3124 // R4_ARG2 = errValue 3125 // 3126 // result: 3127 // R3_RET = *adr or errValue 3128 3129 StubCodeMark mark(this, "StubRoutines", name); 3130 3131 // Entry point, pc or function descriptor. 3132 *entry = __ function_entry(); 3133 3134 // Load *adr into R4_ARG2, may fault. 3135 *fault_pc = __ pc(); 3136 switch (size) { 3137 case 4: 3138 // int32_t, signed extended 3139 __ lwa(R4_ARG2, 0, R3_ARG1); 3140 break; 3141 case 8: 3142 // int64_t 3143 __ ld(R4_ARG2, 0, R3_ARG1); 3144 break; 3145 default: 3146 ShouldNotReachHere(); 3147 } 3148 3149 // return errValue or *adr 3150 *continuation_pc = __ pc(); 3151 __ mr(R3_RET, R4_ARG2); 3152 __ blr(); 3153 } 3154 3155 // Stub for BigInteger::multiplyToLen() 3156 // 3157 // Arguments: 3158 // 3159 // Input: 3160 // R3 - x address 3161 // R4 - x length 3162 // R5 - y address 3163 // R6 - y length 3164 // R7 - z address 3165 // R8 - z length 3166 // 3167 address generate_multiplyToLen() { 3168 3169 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3170 3171 address start = __ function_entry(); 3172 3173 const Register x = R3; 3174 const Register xlen = R4; 3175 const Register y = R5; 3176 const Register ylen = R6; 3177 const Register z = R7; 3178 const Register zlen = R8; 3179 3180 const Register tmp1 = R2; // TOC not used. 3181 const Register tmp2 = R9; 3182 const Register tmp3 = R10; 3183 const Register tmp4 = R11; 3184 const Register tmp5 = R12; 3185 3186 // non-volatile regs 3187 const Register tmp6 = R31; 3188 const Register tmp7 = R30; 3189 const Register tmp8 = R29; 3190 const Register tmp9 = R28; 3191 const Register tmp10 = R27; 3192 const Register tmp11 = R26; 3193 const Register tmp12 = R25; 3194 const Register tmp13 = R24; 3195 3196 BLOCK_COMMENT("Entry:"); 3197 3198 // C2 does not respect int to long conversion for stub calls. 3199 __ clrldi(xlen, xlen, 32); 3200 __ clrldi(ylen, ylen, 32); 3201 __ clrldi(zlen, zlen, 32); 3202 3203 // Save non-volatile regs (frameless). 3204 int current_offs = 8; 3205 __ std(R24, -current_offs, R1_SP); current_offs += 8; 3206 __ std(R25, -current_offs, R1_SP); current_offs += 8; 3207 __ std(R26, -current_offs, R1_SP); current_offs += 8; 3208 __ std(R27, -current_offs, R1_SP); current_offs += 8; 3209 __ std(R28, -current_offs, R1_SP); current_offs += 8; 3210 __ std(R29, -current_offs, R1_SP); current_offs += 8; 3211 __ std(R30, -current_offs, R1_SP); current_offs += 8; 3212 __ std(R31, -current_offs, R1_SP); 3213 3214 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, 3215 tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13); 3216 3217 // Restore non-volatile regs. 3218 current_offs = 8; 3219 __ ld(R24, -current_offs, R1_SP); current_offs += 8; 3220 __ ld(R25, -current_offs, R1_SP); current_offs += 8; 3221 __ ld(R26, -current_offs, R1_SP); current_offs += 8; 3222 __ ld(R27, -current_offs, R1_SP); current_offs += 8; 3223 __ ld(R28, -current_offs, R1_SP); current_offs += 8; 3224 __ ld(R29, -current_offs, R1_SP); current_offs += 8; 3225 __ ld(R30, -current_offs, R1_SP); current_offs += 8; 3226 __ ld(R31, -current_offs, R1_SP); 3227 3228 __ blr(); // Return to caller. 3229 3230 return start; 3231 } 3232 3233 /** 3234 * Arguments: 3235 * 3236 * Input: 3237 * R3_ARG1 - out address 3238 * R4_ARG2 - in address 3239 * R5_ARG3 - offset 3240 * R6_ARG4 - len 3241 * R7_ARG5 - k 3242 * Output: 3243 * R3_RET - carry 3244 */ 3245 address generate_mulAdd() { 3246 __ align(CodeEntryAlignment); 3247 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 3248 3249 address start = __ function_entry(); 3250 3251 // C2 does not sign extend signed parameters to full 64 bits registers: 3252 __ rldic (R5_ARG3, R5_ARG3, 2, 32); // always positive 3253 __ clrldi(R6_ARG4, R6_ARG4, 32); // force zero bits on higher word 3254 __ clrldi(R7_ARG5, R7_ARG5, 32); // force zero bits on higher word 3255 3256 __ muladd(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4, R7_ARG5, R8, R9, R10); 3257 3258 // Moves output carry to return register 3259 __ mr (R3_RET, R10); 3260 3261 __ blr(); 3262 3263 return start; 3264 } 3265 3266 /** 3267 * Arguments: 3268 * 3269 * Input: 3270 * R3_ARG1 - in address 3271 * R4_ARG2 - in length 3272 * R5_ARG3 - out address 3273 * R6_ARG4 - out length 3274 */ 3275 address generate_squareToLen() { 3276 __ align(CodeEntryAlignment); 3277 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 3278 3279 address start = __ function_entry(); 3280 3281 // args - higher word is cleaned (unsignedly) due to int to long casting 3282 const Register in = R3_ARG1; 3283 const Register in_len = R4_ARG2; 3284 __ clrldi(in_len, in_len, 32); 3285 const Register out = R5_ARG3; 3286 const Register out_len = R6_ARG4; 3287 __ clrldi(out_len, out_len, 32); 3288 3289 // output 3290 const Register ret = R3_RET; 3291 3292 // temporaries 3293 const Register lplw_s = R7; 3294 const Register in_aux = R8; 3295 const Register out_aux = R9; 3296 const Register piece = R10; 3297 const Register product = R14; 3298 const Register lplw = R15; 3299 const Register i_minus1 = R16; 3300 const Register carry = R17; 3301 const Register offset = R18; 3302 const Register off_aux = R19; 3303 const Register t = R20; 3304 const Register mlen = R21; 3305 const Register len = R22; 3306 const Register a = R23; 3307 const Register b = R24; 3308 const Register i = R25; 3309 const Register c = R26; 3310 const Register cs = R27; 3311 3312 // Labels 3313 Label SKIP_LSHIFT, SKIP_DIAGONAL_SUM, SKIP_ADDONE, SKIP_LOOP_SQUARE; 3314 Label LOOP_LSHIFT, LOOP_DIAGONAL_SUM, LOOP_ADDONE, LOOP_SQUARE; 3315 3316 // Save non-volatile regs (frameless). 3317 int current_offs = -8; 3318 __ std(R28, current_offs, R1_SP); current_offs -= 8; 3319 __ std(R27, current_offs, R1_SP); current_offs -= 8; 3320 __ std(R26, current_offs, R1_SP); current_offs -= 8; 3321 __ std(R25, current_offs, R1_SP); current_offs -= 8; 3322 __ std(R24, current_offs, R1_SP); current_offs -= 8; 3323 __ std(R23, current_offs, R1_SP); current_offs -= 8; 3324 __ std(R22, current_offs, R1_SP); current_offs -= 8; 3325 __ std(R21, current_offs, R1_SP); current_offs -= 8; 3326 __ std(R20, current_offs, R1_SP); current_offs -= 8; 3327 __ std(R19, current_offs, R1_SP); current_offs -= 8; 3328 __ std(R18, current_offs, R1_SP); current_offs -= 8; 3329 __ std(R17, current_offs, R1_SP); current_offs -= 8; 3330 __ std(R16, current_offs, R1_SP); current_offs -= 8; 3331 __ std(R15, current_offs, R1_SP); current_offs -= 8; 3332 __ std(R14, current_offs, R1_SP); 3333 3334 // Store the squares, right shifted one bit (i.e., divided by 2) 3335 __ subi (out_aux, out, 8); 3336 __ subi (in_aux, in, 4); 3337 __ cmpwi (CCR0, in_len, 0); 3338 // Initialize lplw outside of the loop 3339 __ xorr (lplw, lplw, lplw); 3340 __ ble (CCR0, SKIP_LOOP_SQUARE); // in_len <= 0 3341 __ mtctr (in_len); 3342 3343 __ bind(LOOP_SQUARE); 3344 __ lwzu (piece, 4, in_aux); 3345 __ mulld (product, piece, piece); 3346 // shift left 63 bits and only keep the MSB 3347 __ rldic (lplw_s, lplw, 63, 0); 3348 __ mr (lplw, product); 3349 // shift right 1 bit without sign extension 3350 __ srdi (product, product, 1); 3351 // join them to the same register and store it 3352 __ orr (product, lplw_s, product); 3353 #ifdef VM_LITTLE_ENDIAN 3354 // Swap low and high words for little endian 3355 __ rldicl (product, product, 32, 0); 3356 #endif 3357 __ stdu (product, 8, out_aux); 3358 __ bdnz (LOOP_SQUARE); 3359 3360 __ bind(SKIP_LOOP_SQUARE); 3361 3362 // Add in off-diagonal sums 3363 __ cmpwi (CCR0, in_len, 0); 3364 __ ble (CCR0, SKIP_DIAGONAL_SUM); 3365 // Avoid CTR usage here in order to use it at mulAdd 3366 __ subi (i_minus1, in_len, 1); 3367 __ li (offset, 4); 3368 3369 __ bind(LOOP_DIAGONAL_SUM); 3370 3371 __ sldi (off_aux, out_len, 2); 3372 __ sub (off_aux, off_aux, offset); 3373 3374 __ mr (len, i_minus1); 3375 __ sldi (mlen, i_minus1, 2); 3376 __ lwzx (t, in, mlen); 3377 3378 __ muladd (out, in, off_aux, len, t, a, b, carry); 3379 3380 // begin<addOne> 3381 // off_aux = out_len*4 - 4 - mlen - offset*4 - 4; 3382 __ addi (mlen, mlen, 4); 3383 __ sldi (a, out_len, 2); 3384 __ subi (a, a, 4); 3385 __ sub (a, a, mlen); 3386 __ subi (off_aux, offset, 4); 3387 __ sub (off_aux, a, off_aux); 3388 3389 __ lwzx (b, off_aux, out); 3390 __ add (b, b, carry); 3391 __ stwx (b, off_aux, out); 3392 3393 // if (((uint64_t)s >> 32) != 0) { 3394 __ srdi_ (a, b, 32); 3395 __ beq (CCR0, SKIP_ADDONE); 3396 3397 // while (--mlen >= 0) { 3398 __ bind(LOOP_ADDONE); 3399 __ subi (mlen, mlen, 4); 3400 __ cmpwi (CCR0, mlen, 0); 3401 __ beq (CCR0, SKIP_ADDONE); 3402 3403 // if (--offset_aux < 0) { // Carry out of number 3404 __ subi (off_aux, off_aux, 4); 3405 __ cmpwi (CCR0, off_aux, 0); 3406 __ blt (CCR0, SKIP_ADDONE); 3407 3408 // } else { 3409 __ lwzx (b, off_aux, out); 3410 __ addi (b, b, 1); 3411 __ stwx (b, off_aux, out); 3412 __ cmpwi (CCR0, b, 0); 3413 __ bne (CCR0, SKIP_ADDONE); 3414 __ b (LOOP_ADDONE); 3415 3416 __ bind(SKIP_ADDONE); 3417 // } } } end<addOne> 3418 3419 __ addi (offset, offset, 8); 3420 __ subi (i_minus1, i_minus1, 1); 3421 __ cmpwi (CCR0, i_minus1, 0); 3422 __ bge (CCR0, LOOP_DIAGONAL_SUM); 3423 3424 __ bind(SKIP_DIAGONAL_SUM); 3425 3426 // Shift back up and set low bit 3427 // Shifts 1 bit left up to len positions. Assumes no leading zeros 3428 // begin<primitiveLeftShift> 3429 __ cmpwi (CCR0, out_len, 0); 3430 __ ble (CCR0, SKIP_LSHIFT); 3431 __ li (i, 0); 3432 __ lwz (c, 0, out); 3433 __ subi (b, out_len, 1); 3434 __ mtctr (b); 3435 3436 __ bind(LOOP_LSHIFT); 3437 __ mr (b, c); 3438 __ addi (cs, i, 4); 3439 __ lwzx (c, out, cs); 3440 3441 __ sldi (b, b, 1); 3442 __ srwi (cs, c, 31); 3443 __ orr (b, b, cs); 3444 __ stwx (b, i, out); 3445 3446 __ addi (i, i, 4); 3447 __ bdnz (LOOP_LSHIFT); 3448 3449 __ sldi (c, out_len, 2); 3450 __ subi (c, c, 4); 3451 __ lwzx (b, out, c); 3452 __ sldi (b, b, 1); 3453 __ stwx (b, out, c); 3454 3455 __ bind(SKIP_LSHIFT); 3456 // end<primitiveLeftShift> 3457 3458 // Set low bit 3459 __ sldi (i, in_len, 2); 3460 __ subi (i, i, 4); 3461 __ lwzx (i, in, i); 3462 __ sldi (c, out_len, 2); 3463 __ subi (c, c, 4); 3464 __ lwzx (b, out, c); 3465 3466 __ andi (i, i, 1); 3467 __ orr (i, b, i); 3468 3469 __ stwx (i, out, c); 3470 3471 // Restore non-volatile regs. 3472 current_offs = -8; 3473 __ ld(R28, current_offs, R1_SP); current_offs -= 8; 3474 __ ld(R27, current_offs, R1_SP); current_offs -= 8; 3475 __ ld(R26, current_offs, R1_SP); current_offs -= 8; 3476 __ ld(R25, current_offs, R1_SP); current_offs -= 8; 3477 __ ld(R24, current_offs, R1_SP); current_offs -= 8; 3478 __ ld(R23, current_offs, R1_SP); current_offs -= 8; 3479 __ ld(R22, current_offs, R1_SP); current_offs -= 8; 3480 __ ld(R21, current_offs, R1_SP); current_offs -= 8; 3481 __ ld(R20, current_offs, R1_SP); current_offs -= 8; 3482 __ ld(R19, current_offs, R1_SP); current_offs -= 8; 3483 __ ld(R18, current_offs, R1_SP); current_offs -= 8; 3484 __ ld(R17, current_offs, R1_SP); current_offs -= 8; 3485 __ ld(R16, current_offs, R1_SP); current_offs -= 8; 3486 __ ld(R15, current_offs, R1_SP); current_offs -= 8; 3487 __ ld(R14, current_offs, R1_SP); 3488 3489 __ mr(ret, out); 3490 __ blr(); 3491 3492 return start; 3493 } 3494 3495 /** 3496 * Arguments: 3497 * 3498 * Inputs: 3499 * R3_ARG1 - int crc 3500 * R4_ARG2 - byte* buf 3501 * R5_ARG3 - int length (of buffer) 3502 * 3503 * scratch: 3504 * R2, R6-R12 3505 * 3506 * Ouput: 3507 * R3_RET - int crc result 3508 */ 3509 // Compute CRC32 function. 3510 address generate_CRC32_updateBytes(bool is_crc32c) { 3511 __ align(CodeEntryAlignment); 3512 StubCodeMark mark(this, "StubRoutines", is_crc32c ? "CRC32C_updateBytes" : "CRC32_updateBytes"); 3513 address start = __ function_entry(); // Remember stub start address (is rtn value). 3514 __ crc32(R3_ARG1, R4_ARG2, R5_ARG3, R2, R6, R7, R8, R9, R10, R11, R12, is_crc32c); 3515 __ blr(); 3516 return start; 3517 } 3518 3519 // Initialization 3520 void generate_initial() { 3521 // Generates all stubs and initializes the entry points 3522 3523 // Entry points that exist in all platforms. 3524 // Note: This is code that could be shared among different platforms - however the 3525 // benefit seems to be smaller than the disadvantage of having a 3526 // much more complicated generator structure. See also comment in 3527 // stubRoutines.hpp. 3528 3529 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3530 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 3531 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3532 3533 // Build this early so it's available for the interpreter. 3534 StubRoutines::_throw_StackOverflowError_entry = 3535 generate_throw_exception("StackOverflowError throw_exception", 3536 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 3537 StubRoutines::_throw_delayed_StackOverflowError_entry = 3538 generate_throw_exception("delayed StackOverflowError throw_exception", 3539 CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false); 3540 3541 // CRC32 Intrinsics. 3542 if (UseCRC32Intrinsics) { 3543 StubRoutines::_crc_table_adr = StubRoutines::generate_crc_constants(REVERSE_CRC32_POLY); 3544 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(false); 3545 } 3546 3547 // CRC32C Intrinsics. 3548 if (UseCRC32CIntrinsics) { 3549 StubRoutines::_crc32c_table_addr = StubRoutines::generate_crc_constants(REVERSE_CRC32C_POLY); 3550 StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(true); 3551 } 3552 } 3553 3554 void generate_all() { 3555 // Generates all stubs and initializes the entry points 3556 3557 // These entry points require SharedInfo::stack0 to be set up in 3558 // non-core builds 3559 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 3560 // Handle IncompatibleClassChangeError in itable stubs. 3561 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 3562 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 3563 3564 // support for verify_oop (must happen after universe_init) 3565 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3566 3567 // arraycopy stubs used by compilers 3568 generate_arraycopy_stubs(); 3569 3570 // Safefetch stubs. 3571 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 3572 &StubRoutines::_safefetch32_fault_pc, 3573 &StubRoutines::_safefetch32_continuation_pc); 3574 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 3575 &StubRoutines::_safefetchN_fault_pc, 3576 &StubRoutines::_safefetchN_continuation_pc); 3577 3578 #ifdef COMPILER2 3579 if (UseMultiplyToLenIntrinsic) { 3580 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 3581 } 3582 #endif 3583 3584 if (UseSquareToLenIntrinsic) { 3585 StubRoutines::_squareToLen = generate_squareToLen(); 3586 } 3587 if (UseMulAddIntrinsic) { 3588 StubRoutines::_mulAdd = generate_mulAdd(); 3589 } 3590 if (UseMontgomeryMultiplyIntrinsic) { 3591 StubRoutines::_montgomeryMultiply 3592 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 3593 } 3594 if (UseMontgomerySquareIntrinsic) { 3595 StubRoutines::_montgomerySquare 3596 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 3597 } 3598 3599 if (UseAESIntrinsics) { 3600 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 3601 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 3602 } 3603 3604 if (UseSHA256Intrinsics) { 3605 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 3606 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 3607 } 3608 if (UseSHA512Intrinsics) { 3609 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 3610 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 3611 } 3612 } 3613 3614 public: 3615 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3616 // replace the standard masm with a special one: 3617 _masm = new MacroAssembler(code); 3618 if (all) { 3619 generate_all(); 3620 } else { 3621 generate_initial(); 3622 } 3623 } 3624 }; 3625 3626 #define UCM_TABLE_MAX_ENTRIES 8 3627 void StubGenerator_generate(CodeBuffer* code, bool all) { 3628 if (UnsafeCopyMemory::_table == NULL) { 3629 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 3630 } 3631 StubGenerator g(code, all); 3632 }