1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_x86.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/macros.hpp" 43 #include "vmreg_x86.inline.hpp" 44 45 // Implementation of StubAssembler 46 47 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 48 // setup registers 49 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) 50 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 51 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); 52 assert(args_size >= 0, "illegal args_size"); 53 bool align_stack = false; 54 #ifdef _LP64 55 // At a method handle call, the stack may not be properly aligned 56 // when returning with an exception. 57 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); 58 #endif 59 60 #ifdef _LP64 61 mov(c_rarg0, thread); 62 set_num_rt_args(0); // Nothing on stack 63 #else 64 set_num_rt_args(1 + args_size); 65 66 // push java thread (becomes first argument of C function) 67 get_thread(thread); 68 push(thread); 69 #endif // _LP64 70 71 int call_offset; 72 if (!align_stack) { 73 set_last_Java_frame(thread, noreg, rbp, NULL); 74 } else { 75 address the_pc = pc(); 76 call_offset = offset(); 77 set_last_Java_frame(thread, noreg, rbp, the_pc); 78 andptr(rsp, -(StackAlignmentInBytes)); // Align stack 79 } 80 81 // do the call 82 call(RuntimeAddress(entry)); 83 if (!align_stack) { 84 call_offset = offset(); 85 } 86 // verify callee-saved register 87 #ifdef ASSERT 88 guarantee(thread != rax, "change this code"); 89 push(rax); 90 { Label L; 91 get_thread(rax); 92 cmpptr(thread, rax); 93 jcc(Assembler::equal, L); 94 int3(); 95 stop("StubAssembler::call_RT: rdi not callee saved?"); 96 bind(L); 97 } 98 pop(rax); 99 #endif 100 reset_last_Java_frame(thread, true); 101 102 // discard thread and arguments 103 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); 104 105 // check for pending exceptions 106 { Label L; 107 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 108 jcc(Assembler::equal, L); 109 // exception pending => remove activation and forward to exception handler 110 movptr(rax, Address(thread, Thread::pending_exception_offset())); 111 // make sure that the vm_results are cleared 112 if (oop_result1->is_valid()) { 113 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 114 } 115 if (metadata_result->is_valid()) { 116 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 117 } 118 if (frame_size() == no_frame_size) { 119 leave(); 120 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 121 } else if (_stub_id == Runtime1::forward_exception_id) { 122 should_not_reach_here(); 123 } else { 124 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 125 } 126 bind(L); 127 } 128 // get oop results if there are any and reset the values in the thread 129 if (oop_result1->is_valid()) { 130 get_vm_result(oop_result1, thread); 131 } 132 if (metadata_result->is_valid()) { 133 get_vm_result_2(metadata_result, thread); 134 } 135 return call_offset; 136 } 137 138 139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 140 #ifdef _LP64 141 mov(c_rarg1, arg1); 142 #else 143 push(arg1); 144 #endif // _LP64 145 return call_RT(oop_result1, metadata_result, entry, 1); 146 } 147 148 149 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 150 #ifdef _LP64 151 if (c_rarg1 == arg2) { 152 if (c_rarg2 == arg1) { 153 xchgq(arg1, arg2); 154 } else { 155 mov(c_rarg2, arg2); 156 mov(c_rarg1, arg1); 157 } 158 } else { 159 mov(c_rarg1, arg1); 160 mov(c_rarg2, arg2); 161 } 162 #else 163 push(arg2); 164 push(arg1); 165 #endif // _LP64 166 return call_RT(oop_result1, metadata_result, entry, 2); 167 } 168 169 170 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 171 #ifdef _LP64 172 // if there is any conflict use the stack 173 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 174 arg2 == c_rarg1 || arg1 == c_rarg3 || 175 arg3 == c_rarg1 || arg1 == c_rarg2) { 176 push(arg3); 177 push(arg2); 178 push(arg1); 179 pop(c_rarg1); 180 pop(c_rarg2); 181 pop(c_rarg3); 182 } else { 183 mov(c_rarg1, arg1); 184 mov(c_rarg2, arg2); 185 mov(c_rarg3, arg3); 186 } 187 #else 188 push(arg3); 189 push(arg2); 190 push(arg1); 191 #endif // _LP64 192 return call_RT(oop_result1, metadata_result, entry, 3); 193 } 194 195 196 // Implementation of StubFrame 197 198 class StubFrame: public StackObj { 199 private: 200 StubAssembler* _sasm; 201 202 public: 203 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 204 void load_argument(int offset_in_words, Register reg); 205 206 ~StubFrame(); 207 }; 208 209 void StubAssembler::prologue(const char* name, bool must_gc_arguments) { 210 set_info(name, must_gc_arguments); 211 enter(); 212 } 213 214 void StubAssembler::epilogue() { 215 leave(); 216 ret(0); 217 } 218 219 #define __ _sasm-> 220 221 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 222 _sasm = sasm; 223 __ prologue(name, must_gc_arguments); 224 } 225 226 // load parameters that were stored with LIR_Assembler::store_parameter 227 // Note: offsets for store_parameter and load_argument must match 228 void StubFrame::load_argument(int offset_in_words, Register reg) { 229 __ load_parameter(offset_in_words, reg); 230 } 231 232 233 StubFrame::~StubFrame() { 234 __ epilogue(); 235 } 236 237 #undef __ 238 239 240 // Implementation of Runtime1 241 242 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 243 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; 244 245 // Stack layout for saving/restoring all the registers needed during a runtime 246 // call (this includes deoptimization) 247 // Note: note that users of this frame may well have arguments to some runtime 248 // while these values are on the stack. These positions neglect those arguments 249 // but the code in save_live_registers will take the argument count into 250 // account. 251 // 252 #ifdef _LP64 253 #define SLOT2(x) x, 254 #define SLOT_PER_WORD 2 255 #else 256 #define SLOT2(x) 257 #define SLOT_PER_WORD 1 258 #endif // _LP64 259 260 enum reg_save_layout { 261 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that 262 // happen and will assert if the stack size we create is misaligned 263 #ifdef _LP64 264 align_dummy_0, align_dummy_1, 265 #endif // _LP64 266 #ifdef _WIN64 267 // Windows always allocates space for it's argument registers (see 268 // frame::arg_reg_save_area_bytes). 269 arg_reg_save_1, arg_reg_save_1H, // 0, 4 270 arg_reg_save_2, arg_reg_save_2H, // 8, 12 271 arg_reg_save_3, arg_reg_save_3H, // 16, 20 272 arg_reg_save_4, arg_reg_save_4H, // 24, 28 273 #endif // _WIN64 274 xmm_regs_as_doubles_off, // 32 275 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 276 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 277 // fpu_state_end_off is exclusive 278 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 279 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 280 extra_space_offset, // 360 281 #ifdef _LP64 282 r15_off = extra_space_offset, r15H_off, // 360, 364 283 r14_off, r14H_off, // 368, 372 284 r13_off, r13H_off, // 376, 380 285 r12_off, r12H_off, // 384, 388 286 r11_off, r11H_off, // 392, 396 287 r10_off, r10H_off, // 400, 404 288 r9_off, r9H_off, // 408, 412 289 r8_off, r8H_off, // 416, 420 290 rdi_off, rdiH_off, // 424, 428 291 #else 292 rdi_off = extra_space_offset, 293 #endif // _LP64 294 rsi_off, SLOT2(rsiH_off) // 432, 436 295 rbp_off, SLOT2(rbpH_off) // 440, 444 296 rsp_off, SLOT2(rspH_off) // 448, 452 297 rbx_off, SLOT2(rbxH_off) // 456, 460 298 rdx_off, SLOT2(rdxH_off) // 464, 468 299 rcx_off, SLOT2(rcxH_off) // 472, 476 300 rax_off, SLOT2(raxH_off) // 480, 484 301 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 302 return_off, SLOT2(returnH_off) // 496, 500 303 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 304 }; 305 306 // Save off registers which might be killed by calls into the runtime. 307 // Tries to smart of about FP registers. In particular we separate 308 // saving and describing the FPU registers for deoptimization since we 309 // have to save the FPU registers twice if we describe them and on P4 310 // saving FPU registers which don't contain anything appears 311 // expensive. The deopt blob is the only thing which needs to 312 // describe FPU registers. In all other cases it should be sufficient 313 // to simply save their current value. 314 315 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, 316 bool save_fpu_registers = true) { 317 318 // In 64bit all the args are in regs so there are no additional stack slots 319 LP64_ONLY(num_rt_args = 0); 320 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) 321 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread 322 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 323 324 // record saved value locations in an OopMap 325 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread 326 OopMap* map = new OopMap(frame_size_in_slots, 0); 327 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); 328 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); 329 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); 330 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); 331 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); 332 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); 333 #ifdef _LP64 334 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); 335 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); 336 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); 337 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); 338 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); 339 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); 340 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); 341 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); 342 343 // This is stupid but needed. 344 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); 345 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); 346 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); 347 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); 348 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); 349 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); 350 351 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); 352 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); 353 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); 354 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); 355 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); 356 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); 357 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); 358 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); 359 #endif // _LP64 360 361 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 362 #ifdef _LP64 363 if (UseAVX < 3) { 364 xmm_bypass_limit = xmm_bypass_limit / 2; 365 } 366 #endif 367 368 if (save_fpu_registers) { 369 if (UseSSE < 2) { 370 int fpu_off = float_regs_as_doubles_off; 371 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 372 VMReg fpu_name_0 = FrameMap::fpu_regname(n); 373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); 374 // %%% This is really a waste but we'll keep things as they were for now 375 if (true) { 376 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); 377 } 378 fpu_off += 2; 379 } 380 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); 381 } 382 383 if (UseSSE >= 2) { 384 int xmm_off = xmm_regs_as_doubles_off; 385 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { 386 if (n < xmm_bypass_limit) { 387 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 388 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 389 // %%% This is really a waste but we'll keep things as they were for now 390 if (true) { 391 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); 392 } 393 } 394 xmm_off += 2; 395 } 396 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 397 398 } else if (UseSSE == 1) { 399 int xmm_off = xmm_regs_as_doubles_off; 400 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 401 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); 402 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); 403 xmm_off += 2; 404 } 405 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); 406 } 407 } 408 409 return map; 410 } 411 412 #define __ this-> 413 414 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) { 415 __ block_comment("save_live_registers"); 416 417 __ pusha(); // integer registers 418 419 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 420 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); 421 422 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 423 424 #ifdef ASSERT 425 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 426 #endif 427 428 if (save_fpu_registers) { 429 if (UseSSE < 2) { 430 // save FPU stack 431 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 432 __ fwait(); 433 434 #ifdef ASSERT 435 Label ok; 436 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 437 __ jccb(Assembler::equal, ok); 438 __ stop("corrupted control word detected"); 439 __ bind(ok); 440 #endif 441 442 // Reset the control word to guard against exceptions being unmasked 443 // since fstp_d can cause FPU stack underflow exceptions. Write it 444 // into the on stack copy and then reload that to make sure that the 445 // current and future values are correct. 446 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); 447 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 448 449 // Save the FPU registers in de-opt-able form 450 int offset = 0; 451 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 452 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 453 offset += 8; 454 } 455 } 456 457 if (UseSSE >= 2) { 458 // save XMM registers 459 // XMM registers can contain float or double values, but this is not known here, 460 // so always save them as doubles. 461 // note that float values are _not_ converted automatically, so for float values 462 // the second word contains only garbage data. 463 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 464 int offset = 0; 465 #ifdef _LP64 466 if (UseAVX < 3) { 467 xmm_bypass_limit = xmm_bypass_limit / 2; 468 } 469 #endif 470 for (int n = 0; n < xmm_bypass_limit; n++) { 471 XMMRegister xmm_name = as_XMMRegister(n); 472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 473 offset += 8; 474 } 475 } else if (UseSSE == 1) { 476 // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) 477 int offset = 0; 478 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 479 XMMRegister xmm_name = as_XMMRegister(n); 480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); 481 offset += 8; 482 } 483 } 484 } 485 486 // FPU stack must be empty now 487 __ verify_FPU(0, "save_live_registers"); 488 } 489 490 #undef __ 491 #define __ sasm-> 492 493 static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { 494 if (restore_fpu_registers) { 495 if (UseSSE >= 2) { 496 // restore XMM registers 497 int xmm_bypass_limit = FrameMap::nof_xmm_regs; 498 #ifdef _LP64 499 if (UseAVX < 3) { 500 xmm_bypass_limit = xmm_bypass_limit / 2; 501 } 502 #endif 503 int offset = 0; 504 for (int n = 0; n < xmm_bypass_limit; n++) { 505 XMMRegister xmm_name = as_XMMRegister(n); 506 __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 507 offset += 8; 508 } 509 } else if (UseSSE == 1) { 510 // restore XMM registers(num MMX == num fpu) 511 int offset = 0; 512 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { 513 XMMRegister xmm_name = as_XMMRegister(n); 514 __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); 515 offset += 8; 516 } 517 } 518 519 if (UseSSE < 2) { 520 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); 521 } else { 522 // check that FPU stack is really empty 523 __ verify_FPU(0, "restore_live_registers"); 524 } 525 526 } else { 527 // check that FPU stack is really empty 528 __ verify_FPU(0, "restore_live_registers"); 529 } 530 531 #ifdef ASSERT 532 { 533 Label ok; 534 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); 535 __ jcc(Assembler::equal, ok); 536 __ stop("bad offsets in frame"); 537 __ bind(ok); 538 } 539 #endif // ASSERT 540 541 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); 542 } 543 544 #undef __ 545 #define __ this-> 546 547 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { 548 __ block_comment("restore_live_registers"); 549 550 restore_fpu(this, restore_fpu_registers); 551 __ popa(); 552 } 553 554 555 void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_registers) { 556 __ block_comment("restore_live_registers_except_rax"); 557 558 restore_fpu(this, restore_fpu_registers); 559 560 #ifdef _LP64 561 __ movptr(r15, Address(rsp, 0)); 562 __ movptr(r14, Address(rsp, wordSize)); 563 __ movptr(r13, Address(rsp, 2 * wordSize)); 564 __ movptr(r12, Address(rsp, 3 * wordSize)); 565 __ movptr(r11, Address(rsp, 4 * wordSize)); 566 __ movptr(r10, Address(rsp, 5 * wordSize)); 567 __ movptr(r9, Address(rsp, 6 * wordSize)); 568 __ movptr(r8, Address(rsp, 7 * wordSize)); 569 __ movptr(rdi, Address(rsp, 8 * wordSize)); 570 __ movptr(rsi, Address(rsp, 9 * wordSize)); 571 __ movptr(rbp, Address(rsp, 10 * wordSize)); 572 // skip rsp 573 __ movptr(rbx, Address(rsp, 12 * wordSize)); 574 __ movptr(rdx, Address(rsp, 13 * wordSize)); 575 __ movptr(rcx, Address(rsp, 14 * wordSize)); 576 577 __ addptr(rsp, 16 * wordSize); 578 #else 579 580 __ pop(rdi); 581 __ pop(rsi); 582 __ pop(rbp); 583 __ pop(rbx); // skip this value 584 __ pop(rbx); 585 __ pop(rdx); 586 __ pop(rcx); 587 __ addptr(rsp, BytesPerWord); 588 #endif // _LP64 589 } 590 591 #undef __ 592 #define __ sasm-> 593 594 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, 595 bool save_fpu_registers = true) { 596 __ save_live_registers_no_oop_map(save_fpu_registers); 597 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); 598 } 599 600 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 601 __ restore_live_registers(restore_fpu_registers); 602 } 603 604 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { 605 sasm->restore_live_registers_except_rax(restore_fpu_registers); 606 } 607 608 609 void Runtime1::initialize_pd() { 610 // nothing to do 611 } 612 613 614 // Target: the entry point of the method that creates and posts the exception oop. 615 // has_argument: true if the exception needs arguments (passed on the stack because 616 // registers must be preserved). 617 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 618 // Preserve all registers. 619 int num_rt_args = has_argument ? (2 + 1) : 1; 620 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 621 622 // Now all registers are saved and can be used freely. 623 // Verify that no old value is used accidentally. 624 __ invalidate_registers(true, true, true, true, true, true); 625 626 // Registers used by this stub. 627 const Register temp_reg = rbx; 628 629 // Load arguments for exception that are passed as arguments into the stub. 630 if (has_argument) { 631 #ifdef _LP64 632 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); 633 __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord)); 634 #else 635 __ movptr(temp_reg, Address(rbp, 3*BytesPerWord)); 636 __ push(temp_reg); 637 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); 638 __ push(temp_reg); 639 #endif // _LP64 640 } 641 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); 642 643 OopMapSet* oop_maps = new OopMapSet(); 644 oop_maps->add_gc_map(call_offset, oop_map); 645 646 __ stop("should not reach here"); 647 648 return oop_maps; 649 } 650 651 652 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 653 __ block_comment("generate_handle_exception"); 654 655 // incoming parameters 656 const Register exception_oop = rax; 657 const Register exception_pc = rdx; 658 // other registers used in this stub 659 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 660 661 // Save registers, if required. 662 OopMapSet* oop_maps = new OopMapSet(); 663 OopMap* oop_map = NULL; 664 switch (id) { 665 case forward_exception_id: 666 // We're handling an exception in the context of a compiled frame. 667 // The registers have been saved in the standard places. Perform 668 // an exception lookup in the caller and dispatch to the handler 669 // if found. Otherwise unwind and dispatch to the callers 670 // exception handler. 671 oop_map = generate_oop_map(sasm, 1 /*thread*/); 672 673 // load and clear pending exception oop into RAX 674 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 675 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 676 677 // load issuing PC (the return address for this stub) into rdx 678 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 679 680 // make sure that the vm_results are cleared (may be unnecessary) 681 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 682 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 683 break; 684 case handle_exception_nofpu_id: 685 case handle_exception_id: 686 // At this point all registers MAY be live. 687 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); 688 break; 689 case handle_exception_from_callee_id: { 690 // At this point all registers except exception oop (RAX) and 691 // exception pc (RDX) are dead. 692 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 693 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 694 sasm->set_frame_size(frame_size); 695 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 696 break; 697 } 698 default: ShouldNotReachHere(); 699 } 700 701 #ifdef TIERED 702 // C2 can leave the fpu stack dirty 703 if (UseSSE < 2) { 704 __ empty_FPU_stack(); 705 } 706 #endif // TIERED 707 708 // verify that only rax, and rdx is valid at this time 709 __ invalidate_registers(false, true, true, false, true, true); 710 // verify that rax, contains a valid exception 711 __ verify_not_null_oop(exception_oop); 712 713 // load address of JavaThread object for thread-local data 714 NOT_LP64(__ get_thread(thread);) 715 716 #ifdef ASSERT 717 // check that fields in JavaThread for exception oop and issuing pc are 718 // empty before writing to them 719 Label oop_empty; 720 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); 721 __ jcc(Assembler::equal, oop_empty); 722 __ stop("exception oop already set"); 723 __ bind(oop_empty); 724 725 Label pc_empty; 726 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 727 __ jcc(Assembler::equal, pc_empty); 728 __ stop("exception pc already set"); 729 __ bind(pc_empty); 730 #endif 731 732 // save exception oop and issuing pc into JavaThread 733 // (exception handler will load it from here) 734 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 735 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 736 737 // patch throwing pc into return address (has bci & oop map) 738 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 739 740 // compute the exception handler. 741 // the exception oop and the throwing pc are read from the fields in JavaThread 742 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 743 oop_maps->add_gc_map(call_offset, oop_map); 744 745 // rax: handler address 746 // will be the deopt blob if nmethod was deoptimized while we looked up 747 // handler regardless of whether handler existed in the nmethod. 748 749 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 750 __ invalidate_registers(false, true, true, true, true, true); 751 752 // patch the return address, this stub will directly return to the exception handler 753 __ movptr(Address(rbp, 1*BytesPerWord), rax); 754 755 switch (id) { 756 case forward_exception_id: 757 case handle_exception_nofpu_id: 758 case handle_exception_id: 759 // Restore the registers that were saved at the beginning. 760 restore_live_registers(sasm, id != handle_exception_nofpu_id); 761 break; 762 case handle_exception_from_callee_id: 763 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 764 // since we do a leave anyway. 765 766 // Pop the return address. 767 __ leave(); 768 __ pop(rcx); 769 __ jmp(rcx); // jump to exception handler 770 break; 771 default: ShouldNotReachHere(); 772 } 773 774 return oop_maps; 775 } 776 777 778 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 779 // incoming parameters 780 const Register exception_oop = rax; 781 // callee-saved copy of exception_oop during runtime call 782 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); 783 // other registers used in this stub 784 const Register exception_pc = rdx; 785 const Register handler_addr = rbx; 786 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 787 788 // verify that only rax, is valid at this time 789 __ invalidate_registers(false, true, true, true, true, true); 790 791 #ifdef ASSERT 792 // check that fields in JavaThread for exception oop and issuing pc are empty 793 NOT_LP64(__ get_thread(thread);) 794 Label oop_empty; 795 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); 796 __ jcc(Assembler::equal, oop_empty); 797 __ stop("exception oop must be empty"); 798 __ bind(oop_empty); 799 800 Label pc_empty; 801 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); 802 __ jcc(Assembler::equal, pc_empty); 803 __ stop("exception pc must be empty"); 804 __ bind(pc_empty); 805 #endif 806 807 // clear the FPU stack in case any FPU results are left behind 808 __ empty_FPU_stack(); 809 810 // save exception_oop in callee-saved register to preserve it during runtime calls 811 __ verify_not_null_oop(exception_oop); 812 __ movptr(exception_oop_callee_saved, exception_oop); 813 814 NOT_LP64(__ get_thread(thread);) 815 // Get return address (is on top of stack after leave). 816 __ movptr(exception_pc, Address(rsp, 0)); 817 818 // search the exception handler address of the caller (using the return address) 819 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); 820 // rax: exception handler address of the caller 821 822 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. 823 __ invalidate_registers(false, true, true, true, false, true); 824 825 // move result of call into correct register 826 __ movptr(handler_addr, rax); 827 828 // Restore exception oop to RAX (required convention of exception handler). 829 __ movptr(exception_oop, exception_oop_callee_saved); 830 831 // verify that there is really a valid exception in rax 832 __ verify_not_null_oop(exception_oop); 833 834 // get throwing pc (= return address). 835 // rdx has been destroyed by the call, so it must be set again 836 // the pop is also necessary to simulate the effect of a ret(0) 837 __ pop(exception_pc); 838 839 // continue at exception handler (return address removed) 840 // note: do *not* remove arguments when unwinding the 841 // activation since the caller assumes having 842 // all arguments on the stack when entering the 843 // runtime to determine the exception handler 844 // (GC happens at call site with arguments!) 845 // rax: exception oop 846 // rdx: throwing pc 847 // rbx: exception handler 848 __ jmp(handler_addr); 849 } 850 851 852 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 853 // use the maximum number of runtime-arguments here because it is difficult to 854 // distinguish each RT-Call. 855 // Note: This number affects also the RT-Call in generate_handle_exception because 856 // the oop-map is shared for all calls. 857 const int num_rt_args = 2; // thread + dummy 858 859 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 860 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 861 862 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 863 864 #ifdef _LP64 865 const Register thread = r15_thread; 866 // No need to worry about dummy 867 __ mov(c_rarg0, thread); 868 #else 869 __ push(rax); // push dummy 870 871 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) 872 // push java thread (becomes first argument of C function) 873 __ get_thread(thread); 874 __ push(thread); 875 #endif // _LP64 876 __ set_last_Java_frame(thread, noreg, rbp, NULL); 877 // do the call 878 __ call(RuntimeAddress(target)); 879 OopMapSet* oop_maps = new OopMapSet(); 880 oop_maps->add_gc_map(__ offset(), oop_map); 881 // verify callee-saved register 882 #ifdef ASSERT 883 guarantee(thread != rax, "change this code"); 884 __ push(rax); 885 { Label L; 886 __ get_thread(rax); 887 __ cmpptr(thread, rax); 888 __ jcc(Assembler::equal, L); 889 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); 890 __ bind(L); 891 } 892 __ pop(rax); 893 #endif 894 __ reset_last_Java_frame(thread, true); 895 #ifndef _LP64 896 __ pop(rcx); // discard thread arg 897 __ pop(rcx); // discard dummy 898 #endif // _LP64 899 900 // check for pending exceptions 901 { Label L; 902 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 903 __ jcc(Assembler::equal, L); 904 // exception pending => remove activation and forward to exception handler 905 906 __ testptr(rax, rax); // have we deoptimized? 907 __ jump_cc(Assembler::equal, 908 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 909 910 // the deopt blob expects exceptions in the special fields of 911 // JavaThread, so copy and clear pending exception. 912 913 // load and clear pending exception 914 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); 915 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 916 917 // check that there is really a valid exception 918 __ verify_not_null_oop(rax); 919 920 // load throwing pc: this is the return address of the stub 921 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); 922 923 #ifdef ASSERT 924 // check that fields in JavaThread for exception oop and issuing pc are empty 925 Label oop_empty; 926 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 927 __ jcc(Assembler::equal, oop_empty); 928 __ stop("exception oop must be empty"); 929 __ bind(oop_empty); 930 931 Label pc_empty; 932 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 933 __ jcc(Assembler::equal, pc_empty); 934 __ stop("exception pc must be empty"); 935 __ bind(pc_empty); 936 #endif 937 938 // store exception oop and throwing pc to JavaThread 939 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); 940 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); 941 942 restore_live_registers(sasm); 943 944 __ leave(); 945 __ addptr(rsp, BytesPerWord); // remove return address from stack 946 947 // Forward the exception directly to deopt blob. We can blow no 948 // registers and must leave throwing pc on the stack. A patch may 949 // have values live in registers so the entry point with the 950 // exception in tls. 951 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 952 953 __ bind(L); 954 } 955 956 957 // Runtime will return true if the nmethod has been deoptimized during 958 // the patching process. In that case we must do a deopt reexecute instead. 959 960 Label cont; 961 962 __ testptr(rax, rax); // have we deoptimized? 963 __ jcc(Assembler::equal, cont); // no 964 965 // Will reexecute. Proper return address is already on the stack we just restore 966 // registers, pop all of our frame but the return address and jump to the deopt blob 967 restore_live_registers(sasm); 968 __ leave(); 969 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 970 971 __ bind(cont); 972 restore_live_registers(sasm); 973 __ leave(); 974 __ ret(0); 975 976 return oop_maps; 977 } 978 979 980 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 981 982 // for better readability 983 const bool must_gc_arguments = true; 984 const bool dont_gc_arguments = false; 985 986 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 987 bool save_fpu_registers = true; 988 989 // stub code & info for the different stubs 990 OopMapSet* oop_maps = NULL; 991 switch (id) { 992 case forward_exception_id: 993 { 994 oop_maps = generate_handle_exception(id, sasm); 995 __ leave(); 996 __ ret(0); 997 } 998 break; 999 1000 case new_instance_id: 1001 case fast_new_instance_id: 1002 case fast_new_instance_init_check_id: 1003 { 1004 Register klass = rdx; // Incoming 1005 Register obj = rax; // Result 1006 1007 if (id == new_instance_id) { 1008 __ set_info("new_instance", dont_gc_arguments); 1009 } else if (id == fast_new_instance_id) { 1010 __ set_info("fast new_instance", dont_gc_arguments); 1011 } else { 1012 assert(id == fast_new_instance_init_check_id, "bad StubID"); 1013 __ set_info("fast new_instance init check", dont_gc_arguments); 1014 } 1015 1016 // If TLAB is disabled, see if there is support for inlining contiguous 1017 // allocations. 1018 // Otherwise, just go to the slow path. 1019 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB 1020 && Universe::heap()->supports_inline_contig_alloc()) { 1021 Label slow_path; 1022 Register obj_size = rcx; 1023 Register t1 = rbx; 1024 Register t2 = rsi; 1025 assert_different_registers(klass, obj, obj_size, t1, t2); 1026 1027 __ push(rdi); 1028 __ push(rbx); 1029 1030 if (id == fast_new_instance_init_check_id) { 1031 // make sure the klass is initialized 1032 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 1033 __ jcc(Assembler::notEqual, slow_path); 1034 } 1035 1036 #ifdef ASSERT 1037 // assert object can be fast path allocated 1038 { 1039 Label ok, not_ok; 1040 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1041 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) 1042 __ jcc(Assembler::lessEqual, not_ok); 1043 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); 1044 __ jcc(Assembler::zero, ok); 1045 __ bind(not_ok); 1046 __ stop("assert(can be fast path allocated)"); 1047 __ should_not_reach_here(); 1048 __ bind(ok); 1049 } 1050 #endif // ASSERT 1051 1052 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 1053 NOT_LP64(__ get_thread(thread)); 1054 1055 // get the instance size (size is postive so movl is fine for 64bit) 1056 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); 1057 1058 __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path); 1059 1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 1061 __ verify_oop(obj); 1062 __ pop(rbx); 1063 __ pop(rdi); 1064 __ ret(0); 1065 1066 __ bind(slow_path); 1067 __ pop(rbx); 1068 __ pop(rdi); 1069 } 1070 1071 __ enter(); 1072 OopMap* map = save_live_registers(sasm, 2); 1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 1074 oop_maps = new OopMapSet(); 1075 oop_maps->add_gc_map(call_offset, map); 1076 restore_live_registers_except_rax(sasm); 1077 __ verify_oop(obj); 1078 __ leave(); 1079 __ ret(0); 1080 1081 // rax,: new instance 1082 } 1083 1084 break; 1085 1086 case counter_overflow_id: 1087 { 1088 Register bci = rax, method = rbx; 1089 __ enter(); 1090 OopMap* map = save_live_registers(sasm, 3); 1091 // Retrieve bci 1092 __ movl(bci, Address(rbp, 2*BytesPerWord)); 1093 // And a pointer to the Method* 1094 __ movptr(method, Address(rbp, 3*BytesPerWord)); 1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 1096 oop_maps = new OopMapSet(); 1097 oop_maps->add_gc_map(call_offset, map); 1098 restore_live_registers(sasm); 1099 __ leave(); 1100 __ ret(0); 1101 } 1102 break; 1103 1104 case new_type_array_id: 1105 case new_object_array_id: 1106 case new_value_array_id: 1107 { 1108 Register length = rbx; // Incoming 1109 Register klass = rdx; // Incoming 1110 Register obj = rax; // Result 1111 1112 if (id == new_type_array_id) { 1113 __ set_info("new_type_array", dont_gc_arguments); 1114 } else if (id == new_object_array_id) { 1115 __ set_info("new_object_array", dont_gc_arguments); 1116 } else { 1117 __ set_info("new_value_array", dont_gc_arguments); 1118 } 1119 1120 #ifdef ASSERT 1121 // assert object type is really an array of the proper kind 1122 { 1123 Label ok; 1124 Register t0 = obj; 1125 __ movl(t0, Address(klass, Klass::layout_helper_offset())); 1126 __ sarl(t0, Klass::_lh_array_tag_shift); 1127 switch (id) { 1128 case new_type_array_id: __ cmpl(t0, Klass::_lh_array_tag_type_value); break; 1129 case new_object_array_id: __ cmpl(t0, Klass::_lh_array_tag_obj_value); break; 1130 case new_value_array_id: __ cmpl(t0, Klass::_lh_array_tag_vt_value); break; 1131 default: ShouldNotReachHere(); 1132 } 1133 __ jcc(Assembler::equal, ok); 1134 __ stop("assert(is an array klass)"); 1135 __ should_not_reach_here(); 1136 __ bind(ok); 1137 } 1138 #endif // ASSERT 1139 1140 // If TLAB is disabled, see if there is support for inlining contiguous 1141 // allocations. 1142 // Otherwise, just go to the slow path. 1143 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 1144 Register arr_size = rsi; 1145 Register t1 = rcx; // must be rcx for use as shift count 1146 Register t2 = rdi; 1147 Label slow_path; 1148 1149 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 1150 // since size is positive movl does right thing on 64bit 1151 __ movl(t1, Address(klass, Klass::layout_helper_offset())); 1152 // since size is postive movl does right thing on 64bit 1153 __ movl(arr_size, length); 1154 assert(t1 == rcx, "fixed register usage"); 1155 __ shlptr(arr_size /* by t1=rcx, mod 32 */); 1156 __ shrptr(t1, Klass::_lh_header_size_shift); 1157 __ andptr(t1, Klass::_lh_header_size_mask); 1158 __ addptr(arr_size, t1); 1159 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up 1160 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); 1161 1162 // Using t2 for non 64-bit. 1163 const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); 1164 NOT_LP64(__ get_thread(thread)); 1165 __ eden_allocate(thread, obj, arr_size, 0, t1, slow_path); // preserves arr_size 1166 1167 __ initialize_header(obj, klass, length, t1, t2); 1168 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 1169 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 1170 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 1171 __ andptr(t1, Klass::_lh_header_size_mask); 1172 __ subptr(arr_size, t1); // body length 1173 __ addptr(t1, obj); // body start 1174 __ initialize_body(t1, arr_size, 0, t2); 1175 __ verify_oop(obj); 1176 __ ret(0); 1177 1178 __ bind(slow_path); 1179 } 1180 1181 __ enter(); 1182 OopMap* map = save_live_registers(sasm, 3); 1183 int call_offset; 1184 if (id == new_type_array_id) { 1185 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 1186 } else { 1187 // Runtime1::new_object_array handles both object and value arrays. 1188 // new_value_array_id is needed only for the ASSERT block above. 1189 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 1190 } 1191 1192 oop_maps = new OopMapSet(); 1193 oop_maps->add_gc_map(call_offset, map); 1194 restore_live_registers_except_rax(sasm); 1195 1196 __ verify_oop(obj); 1197 __ leave(); 1198 __ ret(0); 1199 1200 // rax,: new array 1201 } 1202 break; 1203 1204 case new_multi_array_id: 1205 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 1206 // rax,: klass 1207 // rbx,: rank 1208 // rcx: address of 1st dimension 1209 OopMap* map = save_live_registers(sasm, 4); 1210 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); 1211 1212 oop_maps = new OopMapSet(); 1213 oop_maps->add_gc_map(call_offset, map); 1214 restore_live_registers_except_rax(sasm); 1215 1216 // rax,: new multi array 1217 __ verify_oop(rax); 1218 } 1219 break; 1220 1221 case load_flattened_array_id: 1222 { 1223 StubFrame f(sasm, "load_flattened_array", dont_gc_arguments); 1224 OopMap* map = save_live_registers(sasm, 3); 1225 1226 // Called with store_parameter and not C abi 1227 1228 f.load_argument(1, rax); // rax,: array 1229 f.load_argument(0, rbx); // rbx,: index 1230 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), rax, rbx); 1231 1232 oop_maps = new OopMapSet(); 1233 oop_maps->add_gc_map(call_offset, map); 1234 restore_live_registers_except_rax(sasm); 1235 1236 // rax,: loaded element at array[index] 1237 __ verify_oop(rax); 1238 } 1239 break; 1240 1241 case store_flattened_array_id: 1242 { 1243 StubFrame f(sasm, "store_flattened_array", dont_gc_arguments); 1244 OopMap* map = save_live_registers(sasm, 4); 1245 1246 // Called with store_parameter and not C abi 1247 1248 f.load_argument(2, rax); // rax,: array 1249 f.load_argument(1, rbx); // rbx,: index 1250 f.load_argument(0, rcx); // rcx,: value 1251 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), rax, rbx, rcx); 1252 1253 oop_maps = new OopMapSet(); 1254 oop_maps->add_gc_map(call_offset, map); 1255 restore_live_registers_except_rax(sasm); 1256 } 1257 break; 1258 1259 case register_finalizer_id: 1260 { 1261 __ set_info("register_finalizer", dont_gc_arguments); 1262 1263 // This is called via call_runtime so the arguments 1264 // will be place in C abi locations 1265 1266 #ifdef _LP64 1267 __ verify_oop(c_rarg0); 1268 __ mov(rax, c_rarg0); 1269 #else 1270 // The object is passed on the stack and we haven't pushed a 1271 // frame yet so it's one work away from top of stack. 1272 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); 1273 __ verify_oop(rax); 1274 #endif // _LP64 1275 1276 // load the klass and check the has finalizer flag 1277 Label register_finalizer; 1278 Register t = rsi; 1279 __ load_klass(t, rax); 1280 __ movl(t, Address(t, Klass::access_flags_offset())); 1281 __ testl(t, JVM_ACC_HAS_FINALIZER); 1282 __ jcc(Assembler::notZero, register_finalizer); 1283 __ ret(0); 1284 1285 __ bind(register_finalizer); 1286 __ enter(); 1287 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); 1288 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); 1289 oop_maps = new OopMapSet(); 1290 oop_maps->add_gc_map(call_offset, oop_map); 1291 1292 // Now restore all the live registers 1293 restore_live_registers(sasm); 1294 1295 __ leave(); 1296 __ ret(0); 1297 } 1298 break; 1299 1300 case throw_range_check_failed_id: 1301 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1302 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1303 } 1304 break; 1305 1306 case throw_index_exception_id: 1307 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1308 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1309 } 1310 break; 1311 1312 case throw_div0_exception_id: 1313 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 1314 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 1315 } 1316 break; 1317 1318 case throw_null_pointer_exception_id: 1319 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 1320 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 1321 } 1322 break; 1323 1324 case handle_exception_nofpu_id: 1325 case handle_exception_id: 1326 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1327 oop_maps = generate_handle_exception(id, sasm); 1328 } 1329 break; 1330 1331 case handle_exception_from_callee_id: 1332 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1333 oop_maps = generate_handle_exception(id, sasm); 1334 } 1335 break; 1336 1337 case unwind_exception_id: 1338 { __ set_info("unwind_exception", dont_gc_arguments); 1339 // note: no stubframe since we are about to leave the current 1340 // activation and we are calling a leaf VM function only. 1341 generate_unwind_exception(sasm); 1342 } 1343 break; 1344 1345 case throw_array_store_exception_id: 1346 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1347 // tos + 0: link 1348 // + 1: return address 1349 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1350 } 1351 break; 1352 1353 case throw_class_cast_exception_id: 1354 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 1355 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1356 } 1357 break; 1358 1359 case throw_incompatible_class_change_error_id: 1360 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments); 1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1362 } 1363 break; 1364 1365 case throw_illegal_monitor_state_exception_id: 1366 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments); 1367 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false); 1368 } 1369 break; 1370 1371 case slow_subtype_check_id: 1372 { 1373 // Typical calling sequence: 1374 // __ push(klass_RInfo); // object klass or other subclass 1375 // __ push(sup_k_RInfo); // array element klass or other superclass 1376 // __ call(slow_subtype_check); 1377 // Note that the subclass is pushed first, and is therefore deepest. 1378 // Previous versions of this code reversed the names 'sub' and 'super'. 1379 // This was operationally harmless but made the code unreadable. 1380 enum layout { 1381 rax_off, SLOT2(raxH_off) 1382 rcx_off, SLOT2(rcxH_off) 1383 rsi_off, SLOT2(rsiH_off) 1384 rdi_off, SLOT2(rdiH_off) 1385 // saved_rbp_off, SLOT2(saved_rbpH_off) 1386 return_off, SLOT2(returnH_off) 1387 sup_k_off, SLOT2(sup_kH_off) 1388 klass_off, SLOT2(superH_off) 1389 framesize, 1390 result_off = klass_off // deepest argument is also the return value 1391 }; 1392 1393 __ set_info("slow_subtype_check", dont_gc_arguments); 1394 __ push(rdi); 1395 __ push(rsi); 1396 __ push(rcx); 1397 __ push(rax); 1398 1399 // This is called by pushing args and not with C abi 1400 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1401 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1402 1403 Label miss; 1404 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); 1405 1406 // fallthrough on success: 1407 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result 1408 __ pop(rax); 1409 __ pop(rcx); 1410 __ pop(rsi); 1411 __ pop(rdi); 1412 __ ret(0); 1413 1414 __ bind(miss); 1415 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result 1416 __ pop(rax); 1417 __ pop(rcx); 1418 __ pop(rsi); 1419 __ pop(rdi); 1420 __ ret(0); 1421 } 1422 break; 1423 1424 case monitorenter_nofpu_id: 1425 save_fpu_registers = false; 1426 // fall through 1427 case monitorenter_id: 1428 { 1429 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1430 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); 1431 1432 // Called with store_parameter and not C abi 1433 1434 f.load_argument(1, rax); // rax,: object 1435 f.load_argument(0, rbx); // rbx,: lock address 1436 1437 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); 1438 1439 oop_maps = new OopMapSet(); 1440 oop_maps->add_gc_map(call_offset, map); 1441 restore_live_registers(sasm, save_fpu_registers); 1442 } 1443 break; 1444 1445 case monitorexit_nofpu_id: 1446 save_fpu_registers = false; 1447 // fall through 1448 case monitorexit_id: 1449 { 1450 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1451 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); 1452 1453 // Called with store_parameter and not C abi 1454 1455 f.load_argument(0, rax); // rax,: lock address 1456 1457 // note: really a leaf routine but must setup last java sp 1458 // => use call_RT for now (speed can be improved by 1459 // doing last java sp setup manually) 1460 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); 1461 1462 oop_maps = new OopMapSet(); 1463 oop_maps->add_gc_map(call_offset, map); 1464 restore_live_registers(sasm, save_fpu_registers); 1465 } 1466 break; 1467 1468 case deoptimize_id: 1469 { 1470 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1471 const int num_rt_args = 2; // thread, trap_request 1472 OopMap* oop_map = save_live_registers(sasm, num_rt_args); 1473 f.load_argument(0, rax); 1474 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); 1475 oop_maps = new OopMapSet(); 1476 oop_maps->add_gc_map(call_offset, oop_map); 1477 restore_live_registers(sasm); 1478 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1479 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1480 __ leave(); 1481 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1482 } 1483 break; 1484 1485 case access_field_patching_id: 1486 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1487 // we should set up register map 1488 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1489 } 1490 break; 1491 1492 case load_klass_patching_id: 1493 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1494 // we should set up register map 1495 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1496 } 1497 break; 1498 1499 case load_mirror_patching_id: 1500 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1501 // we should set up register map 1502 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1503 } 1504 break; 1505 1506 case load_appendix_patching_id: 1507 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1508 // we should set up register map 1509 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1510 } 1511 break; 1512 1513 case dtrace_object_alloc_id: 1514 { // rax,: object 1515 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 1516 // we can't gc here so skip the oopmap but make sure that all 1517 // the live registers get saved. 1518 save_live_registers(sasm, 1); 1519 1520 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 1521 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 1522 NOT_LP64(__ pop(rax)); 1523 1524 restore_live_registers(sasm); 1525 } 1526 break; 1527 1528 case fpu2long_stub_id: 1529 { 1530 // rax, and rdx are destroyed, but should be free since the result is returned there 1531 // preserve rsi,ecx 1532 __ push(rsi); 1533 __ push(rcx); 1534 LP64_ONLY(__ push(rdx);) 1535 1536 // check for NaN 1537 Label return0, do_return, return_min_jlong, do_convert; 1538 1539 Address value_high_word(rsp, wordSize + 4); 1540 Address value_low_word(rsp, wordSize); 1541 Address result_high_word(rsp, 3*wordSize + 4); 1542 Address result_low_word(rsp, 3*wordSize); 1543 1544 __ subptr(rsp, 32); // more than enough on 32bit 1545 __ fst_d(value_low_word); 1546 __ movl(rax, value_high_word); 1547 __ andl(rax, 0x7ff00000); 1548 __ cmpl(rax, 0x7ff00000); 1549 __ jcc(Assembler::notEqual, do_convert); 1550 __ movl(rax, value_high_word); 1551 __ andl(rax, 0xfffff); 1552 __ orl(rax, value_low_word); 1553 __ jcc(Assembler::notZero, return0); 1554 1555 __ bind(do_convert); 1556 __ fnstcw(Address(rsp, 0)); 1557 __ movzwl(rax, Address(rsp, 0)); 1558 __ orl(rax, 0xc00); 1559 __ movw(Address(rsp, 2), rax); 1560 __ fldcw(Address(rsp, 2)); 1561 __ fwait(); 1562 __ fistp_d(result_low_word); 1563 __ fldcw(Address(rsp, 0)); 1564 __ fwait(); 1565 // This gets the entire long in rax on 64bit 1566 __ movptr(rax, result_low_word); 1567 // testing of high bits 1568 __ movl(rdx, result_high_word); 1569 __ mov(rcx, rax); 1570 // What the heck is the point of the next instruction??? 1571 __ xorl(rcx, 0x0); 1572 __ movl(rsi, 0x80000000); 1573 __ xorl(rsi, rdx); 1574 __ orl(rcx, rsi); 1575 __ jcc(Assembler::notEqual, do_return); 1576 __ fldz(); 1577 __ fcomp_d(value_low_word); 1578 __ fnstsw_ax(); 1579 #ifdef _LP64 1580 __ testl(rax, 0x4100); // ZF & CF == 0 1581 __ jcc(Assembler::equal, return_min_jlong); 1582 #else 1583 __ sahf(); 1584 __ jcc(Assembler::above, return_min_jlong); 1585 #endif // _LP64 1586 // return max_jlong 1587 #ifndef _LP64 1588 __ movl(rdx, 0x7fffffff); 1589 __ movl(rax, 0xffffffff); 1590 #else 1591 __ mov64(rax, CONST64(0x7fffffffffffffff)); 1592 #endif // _LP64 1593 __ jmp(do_return); 1594 1595 __ bind(return_min_jlong); 1596 #ifndef _LP64 1597 __ movl(rdx, 0x80000000); 1598 __ xorl(rax, rax); 1599 #else 1600 __ mov64(rax, UCONST64(0x8000000000000000)); 1601 #endif // _LP64 1602 __ jmp(do_return); 1603 1604 __ bind(return0); 1605 __ fpop(); 1606 #ifndef _LP64 1607 __ xorptr(rdx,rdx); 1608 __ xorptr(rax,rax); 1609 #else 1610 __ xorptr(rax, rax); 1611 #endif // _LP64 1612 1613 __ bind(do_return); 1614 __ addptr(rsp, 32); 1615 LP64_ONLY(__ pop(rdx);) 1616 __ pop(rcx); 1617 __ pop(rsi); 1618 __ ret(0); 1619 } 1620 break; 1621 1622 case predicate_failed_trap_id: 1623 { 1624 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1625 1626 OopMap* map = save_live_registers(sasm, 1); 1627 1628 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1629 oop_maps = new OopMapSet(); 1630 oop_maps->add_gc_map(call_offset, map); 1631 restore_live_registers(sasm); 1632 __ leave(); 1633 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1634 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1635 1636 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1637 } 1638 break; 1639 1640 default: 1641 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1642 __ movptr(rax, (int)id); 1643 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); 1644 __ should_not_reach_here(); 1645 } 1646 break; 1647 } 1648 return oop_maps; 1649 } 1650 1651 #undef __ 1652 1653 const char *Runtime1::pd_name_for_address(address entry) { 1654 return "<unknown function>"; 1655 }