rev 56556 : 8232151: Minimal VM build broken after JDK-8232050
Reviewed-by: dholmes, clanger, redestad
1 /*
2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/debugInfoRec.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/nativeInst.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "gc/shared/gcLocker.hpp"
37 #include "gc/shared/barrierSet.hpp"
38 #include "gc/shared/barrierSetAssembler.hpp"
39 #include "interpreter/interpreter.hpp"
40 #include "logging/log.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/compiledICHolder.hpp"
44 #include "oops/klass.inline.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/vframeArray.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/formatBuffer.hpp"
50 #include "vm_version_x86.hpp"
51 #include "vmreg_x86.inline.hpp"
52 #ifdef COMPILER1
53 #include "c1/c1_Runtime1.hpp"
54 #endif
55 #ifdef COMPILER2
56 #include "opto/runtime.hpp"
57 #endif
58 #if INCLUDE_JVMCI
59 #include "jvmci/jvmciJavaClasses.hpp"
60 #endif
61
62 #define __ masm->
63
64 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
65
66 class SimpleRuntimeFrame {
67
68 public:
69
70 // Most of the runtime stubs have this simple frame layout.
71 // This class exists to make the layout shared in one place.
72 // Offsets are for compiler stack slots, which are jints.
73 enum layout {
74 // The frame sender code expects that rbp will be in the "natural" place and
75 // will override any oopMap setting for it. We must therefore force the layout
76 // so that it agrees with the frame sender code.
77 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
78 rbp_off2,
79 return_off, return_off2,
80 framesize
81 };
82 };
83
84 class RegisterSaver {
85 // Capture info about frame layout. Layout offsets are in jint
86 // units because compiler frame slots are jints.
87 #define XSAVE_AREA_BEGIN 160
88 #define XSAVE_AREA_YMM_BEGIN 576
89 #define XSAVE_AREA_ZMM_BEGIN 1152
90 #define XSAVE_AREA_UPPERBANK 1664
91 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
92 #define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
93 #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
94 enum layout {
95 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
96 xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
97 DEF_XMM_OFFS(0),
98 DEF_XMM_OFFS(1),
99 // 2..15 are implied in range usage
100 ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
101 DEF_YMM_OFFS(0),
102 DEF_YMM_OFFS(1),
103 // 2..15 are implied in range usage
104 zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
105 zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
106 DEF_ZMM_OFFS(16),
107 DEF_ZMM_OFFS(17),
108 // 18..31 are implied in range usage
109 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
110 fpu_stateH_end,
111 r15_off, r15H_off,
112 r14_off, r14H_off,
113 r13_off, r13H_off,
114 r12_off, r12H_off,
115 r11_off, r11H_off,
116 r10_off, r10H_off,
117 r9_off, r9H_off,
118 r8_off, r8H_off,
119 rdi_off, rdiH_off,
120 rsi_off, rsiH_off,
121 ignore_off, ignoreH_off, // extra copy of rbp
122 rsp_off, rspH_off,
123 rbx_off, rbxH_off,
124 rdx_off, rdxH_off,
125 rcx_off, rcxH_off,
126 rax_off, raxH_off,
127 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
128 align_off, alignH_off,
129 flags_off, flagsH_off,
130 // The frame sender code expects that rbp will be in the "natural" place and
131 // will override any oopMap setting for it. We must therefore force the layout
132 // so that it agrees with the frame sender code.
133 rbp_off, rbpH_off, // copy of rbp we will restore
134 return_off, returnH_off, // slot for return address
135 reg_save_size // size in compiler stack slots
136 };
137
138 public:
139 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
140 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
141
142 // Offsets into the register save area
143 // Used by deoptimization when it is managing result register
144 // values on its own
145
146 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
147 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
148 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
149 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
150 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
151
152 // During deoptimization only the result registers need to be restored,
153 // all the other values have already been extracted.
154 static void restore_result_registers(MacroAssembler* masm);
155 };
156
157 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
158 int off = 0;
159 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
160 if (UseAVX < 3) {
161 num_xmm_regs = num_xmm_regs/2;
162 }
163 #if COMPILER2_OR_JVMCI
164 if (save_vectors) {
165 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
166 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
167 }
168 #else
169 assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
170 #endif
171
172 // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
173 int frame_size_in_bytes = align_up(reg_save_size*BytesPerInt, num_xmm_regs);
174 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
175 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
176 // CodeBlob frame size is in words.
177 int frame_size_in_words = frame_size_in_bytes / wordSize;
178 *total_frame_words = frame_size_in_words;
179
180 // Save registers, fpu state, and flags.
181 // We assume caller has already pushed the return address onto the
182 // stack, so rsp is 8-byte aligned here.
183 // We push rpb twice in this sequence because we want the real rbp
184 // to be under the return like a normal enter.
185
186 __ enter(); // rsp becomes 16-byte aligned here
187 __ push_CPU_state(); // Push a multiple of 16 bytes
188
189 // push cpu state handles this on EVEX enabled targets
190 if (save_vectors) {
191 // Save upper half of YMM registers(0..15)
192 int base_addr = XSAVE_AREA_YMM_BEGIN;
193 for (int n = 0; n < 16; n++) {
194 __ vextractf128_high(Address(rsp, base_addr+n*16), as_XMMRegister(n));
195 }
196 if (VM_Version::supports_evex()) {
197 // Save upper half of ZMM registers(0..15)
198 base_addr = XSAVE_AREA_ZMM_BEGIN;
199 for (int n = 0; n < 16; n++) {
200 __ vextractf64x4_high(Address(rsp, base_addr+n*32), as_XMMRegister(n));
201 }
202 // Save full ZMM registers(16..num_xmm_regs)
203 base_addr = XSAVE_AREA_UPPERBANK;
204 off = 0;
205 int vector_len = Assembler::AVX_512bit;
206 for (int n = 16; n < num_xmm_regs; n++) {
207 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
208 }
209 }
210 } else {
211 if (VM_Version::supports_evex()) {
212 // Save upper bank of ZMM registers(16..31) for double/float usage
213 int base_addr = XSAVE_AREA_UPPERBANK;
214 off = 0;
215 for (int n = 16; n < num_xmm_regs; n++) {
216 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
217 }
218 }
219 }
220 __ vzeroupper();
221 if (frame::arg_reg_save_area_bytes != 0) {
222 // Allocate argument register save area
223 __ subptr(rsp, frame::arg_reg_save_area_bytes);
224 }
225
226 // Set an oopmap for the call site. This oopmap will map all
227 // oop-registers and debug-info registers as callee-saved. This
228 // will allow deoptimization at this safepoint to find all possible
229 // debug-info recordings, as well as let GC find all oops.
230
231 OopMapSet *oop_maps = new OopMapSet();
232 OopMap* map = new OopMap(frame_size_in_slots, 0);
233
234 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
235
236 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
237 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
238 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
239 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
240 // rbp location is known implicitly by the frame sender code, needs no oopmap
241 // and the location where rbp was saved by is ignored
242 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
243 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
244 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
249 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
250 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
252 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
253 // on EVEX enabled targets, we get it included in the xsave area
254 off = xmm0_off;
255 int delta = xmm1_off - off;
256 for (int n = 0; n < 16; n++) {
257 XMMRegister xmm_name = as_XMMRegister(n);
258 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
259 off += delta;
260 }
261 if(UseAVX > 2) {
262 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
263 off = zmm16_off;
264 delta = zmm17_off - off;
265 for (int n = 16; n < num_xmm_regs; n++) {
266 XMMRegister zmm_name = as_XMMRegister(n);
267 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
268 off += delta;
269 }
270 }
271
272 #if COMPILER2_OR_JVMCI
273 if (save_vectors) {
274 off = ymm0_off;
275 int delta = ymm1_off - off;
276 for (int n = 0; n < 16; n++) {
277 XMMRegister ymm_name = as_XMMRegister(n);
278 map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
279 off += delta;
280 }
281 }
282 #endif // COMPILER2_OR_JVMCI
283
284 // %%% These should all be a waste but we'll keep things as they were for now
285 if (true) {
286 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
287 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
288 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
289 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
290 // rbp location is known implicitly by the frame sender code, needs no oopmap
291 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
292 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
293 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
294 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
295 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
296 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
297 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
298 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
299 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
300 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
301 // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
302 // on EVEX enabled targets, we get it included in the xsave area
303 off = xmm0H_off;
304 delta = xmm1H_off - off;
305 for (int n = 0; n < 16; n++) {
306 XMMRegister xmm_name = as_XMMRegister(n);
307 map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
308 off += delta;
309 }
310 if (UseAVX > 2) {
311 // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
312 off = zmm16H_off;
313 delta = zmm17H_off - off;
314 for (int n = 16; n < num_xmm_regs; n++) {
315 XMMRegister zmm_name = as_XMMRegister(n);
316 map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
317 off += delta;
318 }
319 }
320 }
321
322 return map;
323 }
324
325 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
326 int num_xmm_regs = XMMRegisterImpl::number_of_registers;
327 if (UseAVX < 3) {
328 num_xmm_regs = num_xmm_regs/2;
329 }
330 if (frame::arg_reg_save_area_bytes != 0) {
331 // Pop arg register save area
332 __ addptr(rsp, frame::arg_reg_save_area_bytes);
333 }
334
335 #if COMPILER2_OR_JVMCI
336 if (restore_vectors) {
337 assert(UseAVX > 0, "Vectors larger than 16 byte long are supported only with AVX");
338 assert(MaxVectorSize <= 64, "Only up to 64 byte long vectors are supported");
339 }
340 #else
341 assert(!restore_vectors, "vectors are generated only by C2");
342 #endif
343
344 __ vzeroupper();
345
346 // On EVEX enabled targets everything is handled in pop fpu state
347 if (restore_vectors) {
348 // Restore upper half of YMM registers (0..15)
349 int base_addr = XSAVE_AREA_YMM_BEGIN;
350 for (int n = 0; n < 16; n++) {
351 __ vinsertf128_high(as_XMMRegister(n), Address(rsp, base_addr+n*16));
352 }
353 if (VM_Version::supports_evex()) {
354 // Restore upper half of ZMM registers (0..15)
355 base_addr = XSAVE_AREA_ZMM_BEGIN;
356 for (int n = 0; n < 16; n++) {
357 __ vinsertf64x4_high(as_XMMRegister(n), Address(rsp, base_addr+n*32));
358 }
359 // Restore full ZMM registers(16..num_xmm_regs)
360 base_addr = XSAVE_AREA_UPPERBANK;
361 int vector_len = Assembler::AVX_512bit;
362 int off = 0;
363 for (int n = 16; n < num_xmm_regs; n++) {
364 __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
365 }
366 }
367 } else {
368 if (VM_Version::supports_evex()) {
369 // Restore upper bank of ZMM registers(16..31) for double/float usage
370 int base_addr = XSAVE_AREA_UPPERBANK;
371 int off = 0;
372 for (int n = 16; n < num_xmm_regs; n++) {
373 __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
374 }
375 }
376 }
377
378 // Recover CPU state
379 __ pop_CPU_state();
380 // Get the rbp described implicitly by the calling convention (no oopMap)
381 __ pop(rbp);
382 }
383
384 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
385
386 // Just restore result register. Only used by deoptimization. By
387 // now any callee save register that needs to be restored to a c2
388 // caller of the deoptee has been extracted into the vframeArray
389 // and will be stuffed into the c2i adapter we create for later
390 // restoration so only result registers need to be restored here.
391
392 // Restore fp result register
393 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
394 // Restore integer result register
395 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
396 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
397
398 // Pop all of the register save are off the stack except the return address
399 __ addptr(rsp, return_offset_in_bytes());
400 }
401
402 // Is vector's size (in bytes) bigger than a size saved by default?
403 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
404 bool SharedRuntime::is_wide_vector(int size) {
405 return size > 16;
406 }
407
408 size_t SharedRuntime::trampoline_size() {
409 return 16;
410 }
411
412 void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
413 __ jump(RuntimeAddress(destination));
414 }
415
416 // The java_calling_convention describes stack locations as ideal slots on
417 // a frame with no abi restrictions. Since we must observe abi restrictions
418 // (like the placement of the register window) the slots must be biased by
419 // the following value.
420 static int reg2offset_in(VMReg r) {
421 // Account for saved rbp and return address
422 // This should really be in_preserve_stack_slots
423 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
424 }
425
426 static int reg2offset_out(VMReg r) {
427 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
428 }
429
430 // ---------------------------------------------------------------------------
431 // Read the array of BasicTypes from a signature, and compute where the
432 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
433 // quantities. Values less than VMRegImpl::stack0 are registers, those above
434 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
435 // as framesizes are fixed.
436 // VMRegImpl::stack0 refers to the first slot 0(sp).
437 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
438 // up to RegisterImpl::number_of_registers) are the 64-bit
439 // integer registers.
440
441 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
442 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
443 // units regardless of build. Of course for i486 there is no 64 bit build
444
445 // The Java calling convention is a "shifted" version of the C ABI.
446 // By skipping the first C ABI register we can call non-static jni methods
447 // with small numbers of arguments without having to shuffle the arguments
448 // at all. Since we control the java ABI we ought to at least get some
449 // advantage out of it.
450
451 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
452 VMRegPair *regs,
453 int total_args_passed,
454 int is_outgoing) {
455
456 // Create the mapping between argument positions and
457 // registers.
458 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
459 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
460 };
461 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
462 j_farg0, j_farg1, j_farg2, j_farg3,
463 j_farg4, j_farg5, j_farg6, j_farg7
464 };
465
466
467 uint int_args = 0;
468 uint fp_args = 0;
469 uint stk_args = 0; // inc by 2 each time
470
471 for (int i = 0; i < total_args_passed; i++) {
472 switch (sig_bt[i]) {
473 case T_BOOLEAN:
474 case T_CHAR:
475 case T_BYTE:
476 case T_SHORT:
477 case T_INT:
478 if (int_args < Argument::n_int_register_parameters_j) {
479 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
480 } else {
481 regs[i].set1(VMRegImpl::stack2reg(stk_args));
482 stk_args += 2;
483 }
484 break;
485 case T_VOID:
486 // halves of T_LONG or T_DOUBLE
487 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
488 regs[i].set_bad();
489 break;
490 case T_LONG:
491 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
492 // fall through
493 case T_OBJECT:
494 case T_ARRAY:
495 case T_ADDRESS:
496 if (int_args < Argument::n_int_register_parameters_j) {
497 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
498 } else {
499 regs[i].set2(VMRegImpl::stack2reg(stk_args));
500 stk_args += 2;
501 }
502 break;
503 case T_FLOAT:
504 if (fp_args < Argument::n_float_register_parameters_j) {
505 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
506 } else {
507 regs[i].set1(VMRegImpl::stack2reg(stk_args));
508 stk_args += 2;
509 }
510 break;
511 case T_DOUBLE:
512 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
513 if (fp_args < Argument::n_float_register_parameters_j) {
514 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
515 } else {
516 regs[i].set2(VMRegImpl::stack2reg(stk_args));
517 stk_args += 2;
518 }
519 break;
520 default:
521 ShouldNotReachHere();
522 break;
523 }
524 }
525
526 return align_up(stk_args, 2);
527 }
528
529 // Patch the callers callsite with entry to compiled code if it exists.
530 static void patch_callers_callsite(MacroAssembler *masm) {
531 Label L;
532 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
533 __ jcc(Assembler::equal, L);
534
535 // Save the current stack pointer
536 __ mov(r13, rsp);
537 // Schedule the branch target address early.
538 // Call into the VM to patch the caller, then jump to compiled callee
539 // rax isn't live so capture return address while we easily can
540 __ movptr(rax, Address(rsp, 0));
541
542 // align stack so push_CPU_state doesn't fault
543 __ andptr(rsp, -(StackAlignmentInBytes));
544 __ push_CPU_state();
545 __ vzeroupper();
546 // VM needs caller's callsite
547 // VM needs target method
548 // This needs to be a long call since we will relocate this adapter to
549 // the codeBuffer and it may not reach
550
551 // Allocate argument register save area
552 if (frame::arg_reg_save_area_bytes != 0) {
553 __ subptr(rsp, frame::arg_reg_save_area_bytes);
554 }
555 __ mov(c_rarg0, rbx);
556 __ mov(c_rarg1, rax);
557 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
558
559 // De-allocate argument register save area
560 if (frame::arg_reg_save_area_bytes != 0) {
561 __ addptr(rsp, frame::arg_reg_save_area_bytes);
562 }
563
564 __ vzeroupper();
565 __ pop_CPU_state();
566 // restore sp
567 __ mov(rsp, r13);
568 __ bind(L);
569 }
570
571
572 static void gen_c2i_adapter(MacroAssembler *masm,
573 int total_args_passed,
574 int comp_args_on_stack,
575 const BasicType *sig_bt,
576 const VMRegPair *regs,
577 Label& skip_fixup) {
578 // Before we get into the guts of the C2I adapter, see if we should be here
579 // at all. We've come from compiled code and are attempting to jump to the
580 // interpreter, which means the caller made a static call to get here
581 // (vcalls always get a compiled target if there is one). Check for a
582 // compiled target. If there is one, we need to patch the caller's call.
583 patch_callers_callsite(masm);
584
585 __ bind(skip_fixup);
586
587 // Since all args are passed on the stack, total_args_passed *
588 // Interpreter::stackElementSize is the space we need. Plus 1 because
589 // we also account for the return address location since
590 // we store it first rather than hold it in rax across all the shuffling
591
592 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
593
594 // stack is aligned, keep it that way
595 extraspace = align_up(extraspace, 2*wordSize);
596
597 // Get return address
598 __ pop(rax);
599
600 // set senderSP value
601 __ mov(r13, rsp);
602
603 __ subptr(rsp, extraspace);
604
605 // Store the return address in the expected location
606 __ movptr(Address(rsp, 0), rax);
607
608 // Now write the args into the outgoing interpreter space
609 for (int i = 0; i < total_args_passed; i++) {
610 if (sig_bt[i] == T_VOID) {
611 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
612 continue;
613 }
614
615 // offset to start parameters
616 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
617 int next_off = st_off - Interpreter::stackElementSize;
618
619 // Say 4 args:
620 // i st_off
621 // 0 32 T_LONG
622 // 1 24 T_VOID
623 // 2 16 T_OBJECT
624 // 3 8 T_BOOL
625 // - 0 return address
626 //
627 // However to make thing extra confusing. Because we can fit a long/double in
628 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
629 // leaves one slot empty and only stores to a single slot. In this case the
630 // slot that is occupied is the T_VOID slot. See I said it was confusing.
631
632 VMReg r_1 = regs[i].first();
633 VMReg r_2 = regs[i].second();
634 if (!r_1->is_valid()) {
635 assert(!r_2->is_valid(), "");
636 continue;
637 }
638 if (r_1->is_stack()) {
639 // memory to memory use rax
640 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
641 if (!r_2->is_valid()) {
642 // sign extend??
643 __ movl(rax, Address(rsp, ld_off));
644 __ movptr(Address(rsp, st_off), rax);
645
646 } else {
647
648 __ movq(rax, Address(rsp, ld_off));
649
650 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
651 // T_DOUBLE and T_LONG use two slots in the interpreter
652 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
653 // ld_off == LSW, ld_off+wordSize == MSW
654 // st_off == MSW, next_off == LSW
655 __ movq(Address(rsp, next_off), rax);
656 #ifdef ASSERT
657 // Overwrite the unused slot with known junk
658 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
659 __ movptr(Address(rsp, st_off), rax);
660 #endif /* ASSERT */
661 } else {
662 __ movq(Address(rsp, st_off), rax);
663 }
664 }
665 } else if (r_1->is_Register()) {
666 Register r = r_1->as_Register();
667 if (!r_2->is_valid()) {
668 // must be only an int (or less ) so move only 32bits to slot
669 // why not sign extend??
670 __ movl(Address(rsp, st_off), r);
671 } else {
672 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
673 // T_DOUBLE and T_LONG use two slots in the interpreter
674 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
675 // long/double in gpr
676 #ifdef ASSERT
677 // Overwrite the unused slot with known junk
678 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
679 __ movptr(Address(rsp, st_off), rax);
680 #endif /* ASSERT */
681 __ movq(Address(rsp, next_off), r);
682 } else {
683 __ movptr(Address(rsp, st_off), r);
684 }
685 }
686 } else {
687 assert(r_1->is_XMMRegister(), "");
688 if (!r_2->is_valid()) {
689 // only a float use just part of the slot
690 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
691 } else {
692 #ifdef ASSERT
693 // Overwrite the unused slot with known junk
694 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
695 __ movptr(Address(rsp, st_off), rax);
696 #endif /* ASSERT */
697 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
698 }
699 }
700 }
701
702 // Schedule the branch target address early.
703 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
704 __ jmp(rcx);
705 }
706
707 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
708 address code_start, address code_end,
709 Label& L_ok) {
710 Label L_fail;
711 __ lea(temp_reg, ExternalAddress(code_start));
712 __ cmpptr(pc_reg, temp_reg);
713 __ jcc(Assembler::belowEqual, L_fail);
714 __ lea(temp_reg, ExternalAddress(code_end));
715 __ cmpptr(pc_reg, temp_reg);
716 __ jcc(Assembler::below, L_ok);
717 __ bind(L_fail);
718 }
719
720 void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
721 int total_args_passed,
722 int comp_args_on_stack,
723 const BasicType *sig_bt,
724 const VMRegPair *regs) {
725
726 // Note: r13 contains the senderSP on entry. We must preserve it since
727 // we may do a i2c -> c2i transition if we lose a race where compiled
728 // code goes non-entrant while we get args ready.
729 // In addition we use r13 to locate all the interpreter args as
730 // we must align the stack to 16 bytes on an i2c entry else we
731 // lose alignment we expect in all compiled code and register
732 // save code can segv when fxsave instructions find improperly
733 // aligned stack pointer.
734
735 // Adapters can be frameless because they do not require the caller
736 // to perform additional cleanup work, such as correcting the stack pointer.
737 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
738 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
739 // even if a callee has modified the stack pointer.
740 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
741 // routinely repairs its caller's stack pointer (from sender_sp, which is set
742 // up via the senderSP register).
743 // In other words, if *either* the caller or callee is interpreted, we can
744 // get the stack pointer repaired after a call.
745 // This is why c2i and i2c adapters cannot be indefinitely composed.
746 // In particular, if a c2i adapter were to somehow call an i2c adapter,
747 // both caller and callee would be compiled methods, and neither would
748 // clean up the stack pointer changes performed by the two adapters.
749 // If this happens, control eventually transfers back to the compiled
750 // caller, but with an uncorrected stack, causing delayed havoc.
751
752 // Pick up the return address
753 __ movptr(rax, Address(rsp, 0));
754
755 if (VerifyAdapterCalls &&
756 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
757 // So, let's test for cascading c2i/i2c adapters right now.
758 // assert(Interpreter::contains($return_addr) ||
759 // StubRoutines::contains($return_addr),
760 // "i2c adapter must return to an interpreter frame");
761 __ block_comment("verify_i2c { ");
762 Label L_ok;
763 if (Interpreter::code() != NULL)
764 range_check(masm, rax, r11,
765 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
766 L_ok);
767 if (StubRoutines::code1() != NULL)
768 range_check(masm, rax, r11,
769 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
770 L_ok);
771 if (StubRoutines::code2() != NULL)
772 range_check(masm, rax, r11,
773 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
774 L_ok);
775 const char* msg = "i2c adapter must return to an interpreter frame";
776 __ block_comment(msg);
777 __ stop(msg);
778 __ bind(L_ok);
779 __ block_comment("} verify_i2ce ");
780 }
781
782 // Must preserve original SP for loading incoming arguments because
783 // we need to align the outgoing SP for compiled code.
784 __ movptr(r11, rsp);
785
786 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
787 // in registers, we will occasionally have no stack args.
788 int comp_words_on_stack = 0;
789 if (comp_args_on_stack) {
790 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
791 // registers are below. By subtracting stack0, we either get a negative
792 // number (all values in registers) or the maximum stack slot accessed.
793
794 // Convert 4-byte c2 stack slots to words.
795 comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
796 // Round up to miminum stack alignment, in wordSize
797 comp_words_on_stack = align_up(comp_words_on_stack, 2);
798 __ subptr(rsp, comp_words_on_stack * wordSize);
799 }
800
801
802 // Ensure compiled code always sees stack at proper alignment
803 __ andptr(rsp, -16);
804
805 // push the return address and misalign the stack that youngest frame always sees
806 // as far as the placement of the call instruction
807 __ push(rax);
808
809 // Put saved SP in another register
810 const Register saved_sp = rax;
811 __ movptr(saved_sp, r11);
812
813 // Will jump to the compiled code just as if compiled code was doing it.
814 // Pre-load the register-jump target early, to schedule it better.
815 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
816
817 #if INCLUDE_JVMCI
818 if (EnableJVMCI || UseAOT) {
819 // check if this call should be routed towards a specific entry point
820 __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
821 Label no_alternative_target;
822 __ jcc(Assembler::equal, no_alternative_target);
823 __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
824 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
825 __ bind(no_alternative_target);
826 }
827 #endif // INCLUDE_JVMCI
828
829 // Now generate the shuffle code. Pick up all register args and move the
830 // rest through the floating point stack top.
831 for (int i = 0; i < total_args_passed; i++) {
832 if (sig_bt[i] == T_VOID) {
833 // Longs and doubles are passed in native word order, but misaligned
834 // in the 32-bit build.
835 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
836 continue;
837 }
838
839 // Pick up 0, 1 or 2 words from SP+offset.
840
841 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
842 "scrambled load targets?");
843 // Load in argument order going down.
844 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
845 // Point to interpreter value (vs. tag)
846 int next_off = ld_off - Interpreter::stackElementSize;
847 //
848 //
849 //
850 VMReg r_1 = regs[i].first();
851 VMReg r_2 = regs[i].second();
852 if (!r_1->is_valid()) {
853 assert(!r_2->is_valid(), "");
854 continue;
855 }
856 if (r_1->is_stack()) {
857 // Convert stack slot to an SP offset (+ wordSize to account for return address )
858 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
859
860 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
861 // and if we end up going thru a c2i because of a miss a reasonable value of r13
862 // will be generated.
863 if (!r_2->is_valid()) {
864 // sign extend???
865 __ movl(r13, Address(saved_sp, ld_off));
866 __ movptr(Address(rsp, st_off), r13);
867 } else {
868 //
869 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
870 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
871 // So we must adjust where to pick up the data to match the interpreter.
872 //
873 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
874 // are accessed as negative so LSW is at LOW address
875
876 // ld_off is MSW so get LSW
877 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
878 next_off : ld_off;
879 __ movq(r13, Address(saved_sp, offset));
880 // st_off is LSW (i.e. reg.first())
881 __ movq(Address(rsp, st_off), r13);
882 }
883 } else if (r_1->is_Register()) { // Register argument
884 Register r = r_1->as_Register();
885 assert(r != rax, "must be different");
886 if (r_2->is_valid()) {
887 //
888 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
889 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
890 // So we must adjust where to pick up the data to match the interpreter.
891
892 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
893 next_off : ld_off;
894
895 // this can be a misaligned move
896 __ movq(r, Address(saved_sp, offset));
897 } else {
898 // sign extend and use a full word?
899 __ movl(r, Address(saved_sp, ld_off));
900 }
901 } else {
902 if (!r_2->is_valid()) {
903 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
904 } else {
905 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
906 }
907 }
908 }
909
910 // 6243940 We might end up in handle_wrong_method if
911 // the callee is deoptimized as we race thru here. If that
912 // happens we don't want to take a safepoint because the
913 // caller frame will look interpreted and arguments are now
914 // "compiled" so it is much better to make this transition
915 // invisible to the stack walking code. Unfortunately if
916 // we try and find the callee by normal means a safepoint
917 // is possible. So we stash the desired callee in the thread
918 // and the vm will find there should this case occur.
919
920 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
921
922 // put Method* where a c2i would expect should we end up there
923 // only needed becaus eof c2 resolve stubs return Method* as a result in
924 // rax
925 __ mov(rax, rbx);
926 __ jmp(r11);
927 }
928
929 // ---------------------------------------------------------------
930 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
931 int total_args_passed,
932 int comp_args_on_stack,
933 const BasicType *sig_bt,
934 const VMRegPair *regs,
935 AdapterFingerPrint* fingerprint) {
936 address i2c_entry = __ pc();
937
938 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
939
940 // -------------------------------------------------------------------------
941 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
942 // to the interpreter. The args start out packed in the compiled layout. They
943 // need to be unpacked into the interpreter layout. This will almost always
944 // require some stack space. We grow the current (compiled) stack, then repack
945 // the args. We finally end in a jump to the generic interpreter entry point.
946 // On exit from the interpreter, the interpreter will restore our SP (lest the
947 // compiled code, which relys solely on SP and not RBP, get sick).
948
949 address c2i_unverified_entry = __ pc();
950 Label skip_fixup;
951 Label ok;
952
953 Register holder = rax;
954 Register receiver = j_rarg0;
955 Register temp = rbx;
956
957 {
958 __ load_klass(temp, receiver);
959 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
960 __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
961 __ jcc(Assembler::equal, ok);
962 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
963
964 __ bind(ok);
965 // Method might have been compiled since the call site was patched to
966 // interpreted if that is the case treat it as a miss so we can get
967 // the call site corrected.
968 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
969 __ jcc(Assembler::equal, skip_fixup);
970 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
971 }
972
973 address c2i_entry = __ pc();
974
975 // Class initialization barrier for static methods
976 address c2i_no_clinit_check_entry = NULL;
977 if (VM_Version::supports_fast_class_init_checks()) {
978 Label L_skip_barrier;
979 Register method = rbx;
980
981 { // Bypass the barrier for non-static methods
982 Register flags = rscratch1;
983 __ movl(flags, Address(method, Method::access_flags_offset()));
984 __ testl(flags, JVM_ACC_STATIC);
985 __ jcc(Assembler::zero, L_skip_barrier); // non-static
986 }
987
988 Register klass = rscratch1;
989 __ load_method_holder(klass, method);
990 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
991
992 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
993
994 __ bind(L_skip_barrier);
995 c2i_no_clinit_check_entry = __ pc();
996 }
997
998 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
999 bs->c2i_entry_barrier(masm);
1000
1001 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1002
1003 __ flush();
1004 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
1005 }
1006
1007 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1008 VMRegPair *regs,
1009 VMRegPair *regs2,
1010 int total_args_passed) {
1011 assert(regs2 == NULL, "not needed on x86");
1012 // We return the amount of VMRegImpl stack slots we need to reserve for all
1013 // the arguments NOT counting out_preserve_stack_slots.
1014
1015 // NOTE: These arrays will have to change when c1 is ported
1016 #ifdef _WIN64
1017 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1018 c_rarg0, c_rarg1, c_rarg2, c_rarg3
1019 };
1020 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1021 c_farg0, c_farg1, c_farg2, c_farg3
1022 };
1023 #else
1024 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1025 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
1026 };
1027 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
1028 c_farg0, c_farg1, c_farg2, c_farg3,
1029 c_farg4, c_farg5, c_farg6, c_farg7
1030 };
1031 #endif // _WIN64
1032
1033
1034 uint int_args = 0;
1035 uint fp_args = 0;
1036 uint stk_args = 0; // inc by 2 each time
1037
1038 for (int i = 0; i < total_args_passed; i++) {
1039 switch (sig_bt[i]) {
1040 case T_BOOLEAN:
1041 case T_CHAR:
1042 case T_BYTE:
1043 case T_SHORT:
1044 case T_INT:
1045 if (int_args < Argument::n_int_register_parameters_c) {
1046 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
1047 #ifdef _WIN64
1048 fp_args++;
1049 // Allocate slots for callee to stuff register args the stack.
1050 stk_args += 2;
1051 #endif
1052 } else {
1053 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1054 stk_args += 2;
1055 }
1056 break;
1057 case T_LONG:
1058 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1059 // fall through
1060 case T_OBJECT:
1061 case T_ARRAY:
1062 case T_ADDRESS:
1063 case T_METADATA:
1064 if (int_args < Argument::n_int_register_parameters_c) {
1065 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1066 #ifdef _WIN64
1067 fp_args++;
1068 stk_args += 2;
1069 #endif
1070 } else {
1071 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1072 stk_args += 2;
1073 }
1074 break;
1075 case T_FLOAT:
1076 if (fp_args < Argument::n_float_register_parameters_c) {
1077 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1078 #ifdef _WIN64
1079 int_args++;
1080 // Allocate slots for callee to stuff register args the stack.
1081 stk_args += 2;
1082 #endif
1083 } else {
1084 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1085 stk_args += 2;
1086 }
1087 break;
1088 case T_DOUBLE:
1089 assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1090 if (fp_args < Argument::n_float_register_parameters_c) {
1091 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1092 #ifdef _WIN64
1093 int_args++;
1094 // Allocate slots for callee to stuff register args the stack.
1095 stk_args += 2;
1096 #endif
1097 } else {
1098 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1099 stk_args += 2;
1100 }
1101 break;
1102 case T_VOID: // Halves of longs and doubles
1103 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1104 regs[i].set_bad();
1105 break;
1106 default:
1107 ShouldNotReachHere();
1108 break;
1109 }
1110 }
1111 #ifdef _WIN64
1112 // windows abi requires that we always allocate enough stack space
1113 // for 4 64bit registers to be stored down.
1114 if (stk_args < 8) {
1115 stk_args = 8;
1116 }
1117 #endif // _WIN64
1118
1119 return stk_args;
1120 }
1121
1122 // On 64 bit we will store integer like items to the stack as
1123 // 64 bits items (sparc abi) even though java would only store
1124 // 32bits for a parameter. On 32bit it will simply be 32 bits
1125 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1126 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1127 if (src.first()->is_stack()) {
1128 if (dst.first()->is_stack()) {
1129 // stack to stack
1130 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1131 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1132 } else {
1133 // stack to reg
1134 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1135 }
1136 } else if (dst.first()->is_stack()) {
1137 // reg to stack
1138 // Do we really have to sign extend???
1139 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1140 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1141 } else {
1142 // Do we really have to sign extend???
1143 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1144 if (dst.first() != src.first()) {
1145 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1146 }
1147 }
1148 }
1149
1150 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1151 if (src.first()->is_stack()) {
1152 if (dst.first()->is_stack()) {
1153 // stack to stack
1154 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1155 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1156 } else {
1157 // stack to reg
1158 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1159 }
1160 } else if (dst.first()->is_stack()) {
1161 // reg to stack
1162 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1163 } else {
1164 if (dst.first() != src.first()) {
1165 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1166 }
1167 }
1168 }
1169
1170 // An oop arg. Must pass a handle not the oop itself
1171 static void object_move(MacroAssembler* masm,
1172 OopMap* map,
1173 int oop_handle_offset,
1174 int framesize_in_slots,
1175 VMRegPair src,
1176 VMRegPair dst,
1177 bool is_receiver,
1178 int* receiver_offset) {
1179
1180 // must pass a handle. First figure out the location we use as a handle
1181
1182 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1183
1184 // See if oop is NULL if it is we need no handle
1185
1186 if (src.first()->is_stack()) {
1187
1188 // Oop is already on the stack as an argument
1189 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1190 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1191 if (is_receiver) {
1192 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1193 }
1194
1195 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1196 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1197 // conditionally move a NULL
1198 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1199 } else {
1200
1201 // Oop is in an a register we must store it to the space we reserve
1202 // on the stack for oop_handles and pass a handle if oop is non-NULL
1203
1204 const Register rOop = src.first()->as_Register();
1205 int oop_slot;
1206 if (rOop == j_rarg0)
1207 oop_slot = 0;
1208 else if (rOop == j_rarg1)
1209 oop_slot = 1;
1210 else if (rOop == j_rarg2)
1211 oop_slot = 2;
1212 else if (rOop == j_rarg3)
1213 oop_slot = 3;
1214 else if (rOop == j_rarg4)
1215 oop_slot = 4;
1216 else {
1217 assert(rOop == j_rarg5, "wrong register");
1218 oop_slot = 5;
1219 }
1220
1221 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1222 int offset = oop_slot*VMRegImpl::stack_slot_size;
1223
1224 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1225 // Store oop in handle area, may be NULL
1226 __ movptr(Address(rsp, offset), rOop);
1227 if (is_receiver) {
1228 *receiver_offset = offset;
1229 }
1230
1231 __ cmpptr(rOop, (int32_t)NULL_WORD);
1232 __ lea(rHandle, Address(rsp, offset));
1233 // conditionally move a NULL from the handle area where it was just stored
1234 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1235 }
1236
1237 // If arg is on the stack then place it otherwise it is already in correct reg.
1238 if (dst.first()->is_stack()) {
1239 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1240 }
1241 }
1242
1243 // A float arg may have to do float reg int reg conversion
1244 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1245 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1246
1247 // The calling conventions assures us that each VMregpair is either
1248 // all really one physical register or adjacent stack slots.
1249 // This greatly simplifies the cases here compared to sparc.
1250
1251 if (src.first()->is_stack()) {
1252 if (dst.first()->is_stack()) {
1253 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1254 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1255 } else {
1256 // stack to reg
1257 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1258 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1259 }
1260 } else if (dst.first()->is_stack()) {
1261 // reg to stack
1262 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1263 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1264 } else {
1265 // reg to reg
1266 // In theory these overlap but the ordering is such that this is likely a nop
1267 if ( src.first() != dst.first()) {
1268 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1269 }
1270 }
1271 }
1272
1273 // A long move
1274 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1275
1276 // The calling conventions assures us that each VMregpair is either
1277 // all really one physical register or adjacent stack slots.
1278 // This greatly simplifies the cases here compared to sparc.
1279
1280 if (src.is_single_phys_reg() ) {
1281 if (dst.is_single_phys_reg()) {
1282 if (dst.first() != src.first()) {
1283 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1284 }
1285 } else {
1286 assert(dst.is_single_reg(), "not a stack pair");
1287 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1288 }
1289 } else if (dst.is_single_phys_reg()) {
1290 assert(src.is_single_reg(), "not a stack pair");
1291 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1292 } else {
1293 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1294 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1295 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1296 }
1297 }
1298
1299 // A double move
1300 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1301
1302 // The calling conventions assures us that each VMregpair is either
1303 // all really one physical register or adjacent stack slots.
1304 // This greatly simplifies the cases here compared to sparc.
1305
1306 if (src.is_single_phys_reg() ) {
1307 if (dst.is_single_phys_reg()) {
1308 // In theory these overlap but the ordering is such that this is likely a nop
1309 if ( src.first() != dst.first()) {
1310 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1311 }
1312 } else {
1313 assert(dst.is_single_reg(), "not a stack pair");
1314 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1315 }
1316 } else if (dst.is_single_phys_reg()) {
1317 assert(src.is_single_reg(), "not a stack pair");
1318 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1319 } else {
1320 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1321 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1322 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1323 }
1324 }
1325
1326
1327 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1328 // We always ignore the frame_slots arg and just use the space just below frame pointer
1329 // which by this time is free to use
1330 switch (ret_type) {
1331 case T_FLOAT:
1332 __ movflt(Address(rbp, -wordSize), xmm0);
1333 break;
1334 case T_DOUBLE:
1335 __ movdbl(Address(rbp, -wordSize), xmm0);
1336 break;
1337 case T_VOID: break;
1338 default: {
1339 __ movptr(Address(rbp, -wordSize), rax);
1340 }
1341 }
1342 }
1343
1344 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1345 // We always ignore the frame_slots arg and just use the space just below frame pointer
1346 // which by this time is free to use
1347 switch (ret_type) {
1348 case T_FLOAT:
1349 __ movflt(xmm0, Address(rbp, -wordSize));
1350 break;
1351 case T_DOUBLE:
1352 __ movdbl(xmm0, Address(rbp, -wordSize));
1353 break;
1354 case T_VOID: break;
1355 default: {
1356 __ movptr(rax, Address(rbp, -wordSize));
1357 }
1358 }
1359 }
1360
1361 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1362 for ( int i = first_arg ; i < arg_count ; i++ ) {
1363 if (args[i].first()->is_Register()) {
1364 __ push(args[i].first()->as_Register());
1365 } else if (args[i].first()->is_XMMRegister()) {
1366 __ subptr(rsp, 2*wordSize);
1367 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1368 }
1369 }
1370 }
1371
1372 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1373 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1374 if (args[i].first()->is_Register()) {
1375 __ pop(args[i].first()->as_Register());
1376 } else if (args[i].first()->is_XMMRegister()) {
1377 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1378 __ addptr(rsp, 2*wordSize);
1379 }
1380 }
1381 }
1382
1383
1384 static void save_or_restore_arguments(MacroAssembler* masm,
1385 const int stack_slots,
1386 const int total_in_args,
1387 const int arg_save_area,
1388 OopMap* map,
1389 VMRegPair* in_regs,
1390 BasicType* in_sig_bt) {
1391 // if map is non-NULL then the code should store the values,
1392 // otherwise it should load them.
1393 int slot = arg_save_area;
1394 // Save down double word first
1395 for ( int i = 0; i < total_in_args; i++) {
1396 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1397 int offset = slot * VMRegImpl::stack_slot_size;
1398 slot += VMRegImpl::slots_per_word;
1399 assert(slot <= stack_slots, "overflow");
1400 if (map != NULL) {
1401 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1402 } else {
1403 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1404 }
1405 }
1406 if (in_regs[i].first()->is_Register() &&
1407 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1408 int offset = slot * VMRegImpl::stack_slot_size;
1409 if (map != NULL) {
1410 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1411 if (in_sig_bt[i] == T_ARRAY) {
1412 map->set_oop(VMRegImpl::stack2reg(slot));;
1413 }
1414 } else {
1415 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1416 }
1417 slot += VMRegImpl::slots_per_word;
1418 }
1419 }
1420 // Save or restore single word registers
1421 for ( int i = 0; i < total_in_args; i++) {
1422 if (in_regs[i].first()->is_Register()) {
1423 int offset = slot * VMRegImpl::stack_slot_size;
1424 slot++;
1425 assert(slot <= stack_slots, "overflow");
1426
1427 // Value is in an input register pass we must flush it to the stack
1428 const Register reg = in_regs[i].first()->as_Register();
1429 switch (in_sig_bt[i]) {
1430 case T_BOOLEAN:
1431 case T_CHAR:
1432 case T_BYTE:
1433 case T_SHORT:
1434 case T_INT:
1435 if (map != NULL) {
1436 __ movl(Address(rsp, offset), reg);
1437 } else {
1438 __ movl(reg, Address(rsp, offset));
1439 }
1440 break;
1441 case T_ARRAY:
1442 case T_LONG:
1443 // handled above
1444 break;
1445 case T_OBJECT:
1446 default: ShouldNotReachHere();
1447 }
1448 } else if (in_regs[i].first()->is_XMMRegister()) {
1449 if (in_sig_bt[i] == T_FLOAT) {
1450 int offset = slot * VMRegImpl::stack_slot_size;
1451 slot++;
1452 assert(slot <= stack_slots, "overflow");
1453 if (map != NULL) {
1454 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1455 } else {
1456 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1457 }
1458 }
1459 } else if (in_regs[i].first()->is_stack()) {
1460 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1461 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1462 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1463 }
1464 }
1465 }
1466 }
1467
1468 // Pin object, return pinned object or null in rax
1469 static void gen_pin_object(MacroAssembler* masm,
1470 VMRegPair reg) {
1471 __ block_comment("gen_pin_object {");
1472
1473 // rax always contains oop, either incoming or
1474 // pinned.
1475 Register tmp_reg = rax;
1476
1477 Label is_null;
1478 VMRegPair tmp;
1479 VMRegPair in_reg = reg;
1480
1481 tmp.set_ptr(tmp_reg->as_VMReg());
1482 if (reg.first()->is_stack()) {
1483 // Load the arg up from the stack
1484 move_ptr(masm, reg, tmp);
1485 reg = tmp;
1486 } else {
1487 __ movptr(rax, reg.first()->as_Register());
1488 }
1489 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1490 __ jccb(Assembler::equal, is_null);
1491
1492 if (reg.first()->as_Register() != c_rarg1) {
1493 __ movptr(c_rarg1, reg.first()->as_Register());
1494 }
1495
1496 __ call_VM_leaf(
1497 CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
1498 r15_thread, c_rarg1);
1499
1500 __ bind(is_null);
1501 __ block_comment("} gen_pin_object");
1502 }
1503
1504 // Unpin object
1505 static void gen_unpin_object(MacroAssembler* masm,
1506 VMRegPair reg) {
1507 __ block_comment("gen_unpin_object {");
1508 Label is_null;
1509
1510 if (reg.first()->is_stack()) {
1511 __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
1512 } else if (reg.first()->as_Register() != c_rarg1) {
1513 __ movptr(c_rarg1, reg.first()->as_Register());
1514 }
1515
1516 __ testptr(c_rarg1, c_rarg1);
1517 __ jccb(Assembler::equal, is_null);
1518
1519 __ call_VM_leaf(
1520 CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
1521 r15_thread, c_rarg1);
1522
1523 __ bind(is_null);
1524 __ block_comment("} gen_unpin_object");
1525 }
1526
1527 // Check GCLocker::needs_gc and enter the runtime if it's true. This
1528 // keeps a new JNI critical region from starting until a GC has been
1529 // forced. Save down any oops in registers and describe them in an
1530 // OopMap.
1531 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1532 int stack_slots,
1533 int total_c_args,
1534 int total_in_args,
1535 int arg_save_area,
1536 OopMapSet* oop_maps,
1537 VMRegPair* in_regs,
1538 BasicType* in_sig_bt) {
1539 __ block_comment("check GCLocker::needs_gc");
1540 Label cont;
1541 __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
1542 __ jcc(Assembler::equal, cont);
1543
1544 // Save down any incoming oops and call into the runtime to halt for a GC
1545
1546 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1547 save_or_restore_arguments(masm, stack_slots, total_in_args,
1548 arg_save_area, map, in_regs, in_sig_bt);
1549
1550 address the_pc = __ pc();
1551 oop_maps->add_gc_map( __ offset(), map);
1552 __ set_last_Java_frame(rsp, noreg, the_pc);
1553
1554 __ block_comment("block_for_jni_critical");
1555 __ movptr(c_rarg0, r15_thread);
1556 __ mov(r12, rsp); // remember sp
1557 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1558 __ andptr(rsp, -16); // align stack as required by ABI
1559 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1560 __ mov(rsp, r12); // restore sp
1561 __ reinit_heapbase();
1562
1563 __ reset_last_Java_frame(false);
1564
1565 save_or_restore_arguments(masm, stack_slots, total_in_args,
1566 arg_save_area, NULL, in_regs, in_sig_bt);
1567 __ bind(cont);
1568 #ifdef ASSERT
1569 if (StressCriticalJNINatives) {
1570 // Stress register saving
1571 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1572 save_or_restore_arguments(masm, stack_slots, total_in_args,
1573 arg_save_area, map, in_regs, in_sig_bt);
1574 // Destroy argument registers
1575 for (int i = 0; i < total_in_args - 1; i++) {
1576 if (in_regs[i].first()->is_Register()) {
1577 const Register reg = in_regs[i].first()->as_Register();
1578 __ xorptr(reg, reg);
1579 } else if (in_regs[i].first()->is_XMMRegister()) {
1580 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1581 } else if (in_regs[i].first()->is_FloatRegister()) {
1582 ShouldNotReachHere();
1583 } else if (in_regs[i].first()->is_stack()) {
1584 // Nothing to do
1585 } else {
1586 ShouldNotReachHere();
1587 }
1588 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1589 i++;
1590 }
1591 }
1592
1593 save_or_restore_arguments(masm, stack_slots, total_in_args,
1594 arg_save_area, NULL, in_regs, in_sig_bt);
1595 }
1596 #endif
1597 }
1598
1599 // Unpack an array argument into a pointer to the body and the length
1600 // if the array is non-null, otherwise pass 0 for both.
1601 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1602 Register tmp_reg = rax;
1603 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1604 "possible collision");
1605 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1606 "possible collision");
1607
1608 __ block_comment("unpack_array_argument {");
1609
1610 // Pass the length, ptr pair
1611 Label is_null, done;
1612 VMRegPair tmp;
1613 tmp.set_ptr(tmp_reg->as_VMReg());
1614 if (reg.first()->is_stack()) {
1615 // Load the arg up from the stack
1616 move_ptr(masm, reg, tmp);
1617 reg = tmp;
1618 }
1619 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1620 __ jccb(Assembler::equal, is_null);
1621 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1622 move_ptr(masm, tmp, body_arg);
1623 // load the length relative to the body.
1624 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1625 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1626 move32_64(masm, tmp, length_arg);
1627 __ jmpb(done);
1628 __ bind(is_null);
1629 // Pass zeros
1630 __ xorptr(tmp_reg, tmp_reg);
1631 move_ptr(masm, tmp, body_arg);
1632 move32_64(masm, tmp, length_arg);
1633 __ bind(done);
1634
1635 __ block_comment("} unpack_array_argument");
1636 }
1637
1638
1639 // Different signatures may require very different orders for the move
1640 // to avoid clobbering other arguments. There's no simple way to
1641 // order them safely. Compute a safe order for issuing stores and
1642 // break any cycles in those stores. This code is fairly general but
1643 // it's not necessary on the other platforms so we keep it in the
1644 // platform dependent code instead of moving it into a shared file.
1645 // (See bugs 7013347 & 7145024.)
1646 // Note that this code is specific to LP64.
1647 class ComputeMoveOrder: public StackObj {
1648 class MoveOperation: public ResourceObj {
1649 friend class ComputeMoveOrder;
1650 private:
1651 VMRegPair _src;
1652 VMRegPair _dst;
1653 int _src_index;
1654 int _dst_index;
1655 bool _processed;
1656 MoveOperation* _next;
1657 MoveOperation* _prev;
1658
1659 static int get_id(VMRegPair r) {
1660 return r.first()->value();
1661 }
1662
1663 public:
1664 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1665 _src(src)
1666 , _dst(dst)
1667 , _src_index(src_index)
1668 , _dst_index(dst_index)
1669 , _processed(false)
1670 , _next(NULL)
1671 , _prev(NULL) {
1672 }
1673
1674 VMRegPair src() const { return _src; }
1675 int src_id() const { return get_id(src()); }
1676 int src_index() const { return _src_index; }
1677 VMRegPair dst() const { return _dst; }
1678 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1679 int dst_index() const { return _dst_index; }
1680 int dst_id() const { return get_id(dst()); }
1681 MoveOperation* next() const { return _next; }
1682 MoveOperation* prev() const { return _prev; }
1683 void set_processed() { _processed = true; }
1684 bool is_processed() const { return _processed; }
1685
1686 // insert
1687 void break_cycle(VMRegPair temp_register) {
1688 // create a new store following the last store
1689 // to move from the temp_register to the original
1690 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1691
1692 // break the cycle of links and insert new_store at the end
1693 // break the reverse link.
1694 MoveOperation* p = prev();
1695 assert(p->next() == this, "must be");
1696 _prev = NULL;
1697 p->_next = new_store;
1698 new_store->_prev = p;
1699
1700 // change the original store to save it's value in the temp.
1701 set_dst(-1, temp_register);
1702 }
1703
1704 void link(GrowableArray<MoveOperation*>& killer) {
1705 // link this store in front the store that it depends on
1706 MoveOperation* n = killer.at_grow(src_id(), NULL);
1707 if (n != NULL) {
1708 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1709 _next = n;
1710 n->_prev = this;
1711 }
1712 }
1713 };
1714
1715 private:
1716 GrowableArray<MoveOperation*> edges;
1717
1718 public:
1719 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1720 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1721 // Move operations where the dest is the stack can all be
1722 // scheduled first since they can't interfere with the other moves.
1723 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1724 if (in_sig_bt[i] == T_ARRAY) {
1725 c_arg--;
1726 if (out_regs[c_arg].first()->is_stack() &&
1727 out_regs[c_arg + 1].first()->is_stack()) {
1728 arg_order.push(i);
1729 arg_order.push(c_arg);
1730 } else {
1731 if (out_regs[c_arg].first()->is_stack() ||
1732 in_regs[i].first() == out_regs[c_arg].first()) {
1733 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1734 } else {
1735 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1736 }
1737 }
1738 } else if (in_sig_bt[i] == T_VOID) {
1739 arg_order.push(i);
1740 arg_order.push(c_arg);
1741 } else {
1742 if (out_regs[c_arg].first()->is_stack() ||
1743 in_regs[i].first() == out_regs[c_arg].first()) {
1744 arg_order.push(i);
1745 arg_order.push(c_arg);
1746 } else {
1747 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1748 }
1749 }
1750 }
1751 // Break any cycles in the register moves and emit the in the
1752 // proper order.
1753 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1754 for (int i = 0; i < stores->length(); i++) {
1755 arg_order.push(stores->at(i)->src_index());
1756 arg_order.push(stores->at(i)->dst_index());
1757 }
1758 }
1759
1760 // Collected all the move operations
1761 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1762 if (src.first() == dst.first()) return;
1763 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1764 }
1765
1766 // Walk the edges breaking cycles between moves. The result list
1767 // can be walked in order to produce the proper set of loads
1768 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1769 // Record which moves kill which values
1770 GrowableArray<MoveOperation*> killer;
1771 for (int i = 0; i < edges.length(); i++) {
1772 MoveOperation* s = edges.at(i);
1773 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1774 killer.at_put_grow(s->dst_id(), s, NULL);
1775 }
1776 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1777 "make sure temp isn't in the registers that are killed");
1778
1779 // create links between loads and stores
1780 for (int i = 0; i < edges.length(); i++) {
1781 edges.at(i)->link(killer);
1782 }
1783
1784 // at this point, all the move operations are chained together
1785 // in a doubly linked list. Processing it backwards finds
1786 // the beginning of the chain, forwards finds the end. If there's
1787 // a cycle it can be broken at any point, so pick an edge and walk
1788 // backward until the list ends or we end where we started.
1789 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1790 for (int e = 0; e < edges.length(); e++) {
1791 MoveOperation* s = edges.at(e);
1792 if (!s->is_processed()) {
1793 MoveOperation* start = s;
1794 // search for the beginning of the chain or cycle
1795 while (start->prev() != NULL && start->prev() != s) {
1796 start = start->prev();
1797 }
1798 if (start->prev() == s) {
1799 start->break_cycle(temp_register);
1800 }
1801 // walk the chain forward inserting to store list
1802 while (start != NULL) {
1803 stores->append(start);
1804 start->set_processed();
1805 start = start->next();
1806 }
1807 }
1808 }
1809 return stores;
1810 }
1811 };
1812
1813 static void verify_oop_args(MacroAssembler* masm,
1814 const methodHandle& method,
1815 const BasicType* sig_bt,
1816 const VMRegPair* regs) {
1817 Register temp_reg = rbx; // not part of any compiled calling seq
1818 if (VerifyOops) {
1819 for (int i = 0; i < method->size_of_parameters(); i++) {
1820 if (is_reference_type(sig_bt[i])) {
1821 VMReg r = regs[i].first();
1822 assert(r->is_valid(), "bad oop arg");
1823 if (r->is_stack()) {
1824 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1825 __ verify_oop(temp_reg);
1826 } else {
1827 __ verify_oop(r->as_Register());
1828 }
1829 }
1830 }
1831 }
1832 }
1833
1834 static void gen_special_dispatch(MacroAssembler* masm,
1835 const methodHandle& method,
1836 const BasicType* sig_bt,
1837 const VMRegPair* regs) {
1838 verify_oop_args(masm, method, sig_bt, regs);
1839 vmIntrinsics::ID iid = method->intrinsic_id();
1840
1841 // Now write the args into the outgoing interpreter space
1842 bool has_receiver = false;
1843 Register receiver_reg = noreg;
1844 int member_arg_pos = -1;
1845 Register member_reg = noreg;
1846 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1847 if (ref_kind != 0) {
1848 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1849 member_reg = rbx; // known to be free at this point
1850 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1851 } else if (iid == vmIntrinsics::_invokeBasic) {
1852 has_receiver = true;
1853 } else {
1854 fatal("unexpected intrinsic id %d", iid);
1855 }
1856
1857 if (member_reg != noreg) {
1858 // Load the member_arg into register, if necessary.
1859 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1860 VMReg r = regs[member_arg_pos].first();
1861 if (r->is_stack()) {
1862 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1863 } else {
1864 // no data motion is needed
1865 member_reg = r->as_Register();
1866 }
1867 }
1868
1869 if (has_receiver) {
1870 // Make sure the receiver is loaded into a register.
1871 assert(method->size_of_parameters() > 0, "oob");
1872 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1873 VMReg r = regs[0].first();
1874 assert(r->is_valid(), "bad receiver arg");
1875 if (r->is_stack()) {
1876 // Porting note: This assumes that compiled calling conventions always
1877 // pass the receiver oop in a register. If this is not true on some
1878 // platform, pick a temp and load the receiver from stack.
1879 fatal("receiver always in a register");
1880 receiver_reg = j_rarg0; // known to be free at this point
1881 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1882 } else {
1883 // no data motion is needed
1884 receiver_reg = r->as_Register();
1885 }
1886 }
1887
1888 // Figure out which address we are really jumping to:
1889 MethodHandles::generate_method_handle_dispatch(masm, iid,
1890 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1891 }
1892
1893 // ---------------------------------------------------------------------------
1894 // Generate a native wrapper for a given method. The method takes arguments
1895 // in the Java compiled code convention, marshals them to the native
1896 // convention (handlizes oops, etc), transitions to native, makes the call,
1897 // returns to java state (possibly blocking), unhandlizes any result and
1898 // returns.
1899 //
1900 // Critical native functions are a shorthand for the use of
1901 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1902 // functions. The wrapper is expected to unpack the arguments before
1903 // passing them to the callee and perform checks before and after the
1904 // native call to ensure that they GCLocker
1905 // lock_critical/unlock_critical semantics are followed. Some other
1906 // parts of JNI setup are skipped like the tear down of the JNI handle
1907 // block and the check for pending exceptions it's impossible for them
1908 // to be thrown.
1909 //
1910 // They are roughly structured like this:
1911 // if (GCLocker::needs_gc())
1912 // SharedRuntime::block_for_jni_critical();
1913 // tranistion to thread_in_native
1914 // unpack arrray arguments and call native entry point
1915 // check for safepoint in progress
1916 // check if any thread suspend flags are set
1917 // call into JVM and possible unlock the JNI critical
1918 // if a GC was suppressed while in the critical native.
1919 // transition back to thread_in_Java
1920 // return to caller
1921 //
1922 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1923 const methodHandle& method,
1924 int compile_id,
1925 BasicType* in_sig_bt,
1926 VMRegPair* in_regs,
1927 BasicType ret_type,
1928 address critical_entry) {
1929 if (method->is_method_handle_intrinsic()) {
1930 vmIntrinsics::ID iid = method->intrinsic_id();
1931 intptr_t start = (intptr_t)__ pc();
1932 int vep_offset = ((intptr_t)__ pc()) - start;
1933 gen_special_dispatch(masm,
1934 method,
1935 in_sig_bt,
1936 in_regs);
1937 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1938 __ flush();
1939 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1940 return nmethod::new_native_nmethod(method,
1941 compile_id,
1942 masm->code(),
1943 vep_offset,
1944 frame_complete,
1945 stack_slots / VMRegImpl::slots_per_word,
1946 in_ByteSize(-1),
1947 in_ByteSize(-1),
1948 (OopMapSet*)NULL);
1949 }
1950 bool is_critical_native = true;
1951 address native_func = critical_entry;
1952 if (native_func == NULL) {
1953 native_func = method->native_function();
1954 is_critical_native = false;
1955 }
1956 assert(native_func != NULL, "must have function");
1957
1958 // An OopMap for lock (and class if static)
1959 OopMapSet *oop_maps = new OopMapSet();
1960 intptr_t start = (intptr_t)__ pc();
1961
1962 // We have received a description of where all the java arg are located
1963 // on entry to the wrapper. We need to convert these args to where
1964 // the jni function will expect them. To figure out where they go
1965 // we convert the java signature to a C signature by inserting
1966 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1967
1968 const int total_in_args = method->size_of_parameters();
1969 int total_c_args = total_in_args;
1970 if (!is_critical_native) {
1971 total_c_args += 1;
1972 if (method->is_static()) {
1973 total_c_args++;
1974 }
1975 } else {
1976 for (int i = 0; i < total_in_args; i++) {
1977 if (in_sig_bt[i] == T_ARRAY) {
1978 total_c_args++;
1979 }
1980 }
1981 }
1982
1983 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1984 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1985 BasicType* in_elem_bt = NULL;
1986
1987 int argc = 0;
1988 if (!is_critical_native) {
1989 out_sig_bt[argc++] = T_ADDRESS;
1990 if (method->is_static()) {
1991 out_sig_bt[argc++] = T_OBJECT;
1992 }
1993
1994 for (int i = 0; i < total_in_args ; i++ ) {
1995 out_sig_bt[argc++] = in_sig_bt[i];
1996 }
1997 } else {
1998 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1999 SignatureStream ss(method->signature());
2000 for (int i = 0; i < total_in_args ; i++ ) {
2001 if (in_sig_bt[i] == T_ARRAY) {
2002 // Arrays are passed as int, elem* pair
2003 out_sig_bt[argc++] = T_INT;
2004 out_sig_bt[argc++] = T_ADDRESS;
2005 Symbol* atype = ss.as_symbol();
2006 const char* at = atype->as_C_string();
2007 if (strlen(at) == 2) {
2008 assert(at[0] == '[', "must be");
2009 switch (at[1]) {
2010 case 'B': in_elem_bt[i] = T_BYTE; break;
2011 case 'C': in_elem_bt[i] = T_CHAR; break;
2012 case 'D': in_elem_bt[i] = T_DOUBLE; break;
2013 case 'F': in_elem_bt[i] = T_FLOAT; break;
2014 case 'I': in_elem_bt[i] = T_INT; break;
2015 case 'J': in_elem_bt[i] = T_LONG; break;
2016 case 'S': in_elem_bt[i] = T_SHORT; break;
2017 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
2018 default: ShouldNotReachHere();
2019 }
2020 }
2021 } else {
2022 out_sig_bt[argc++] = in_sig_bt[i];
2023 in_elem_bt[i] = T_VOID;
2024 }
2025 if (in_sig_bt[i] != T_VOID) {
2026 assert(in_sig_bt[i] == ss.type(), "must match");
2027 ss.next();
2028 }
2029 }
2030 }
2031
2032 // Now figure out where the args must be stored and how much stack space
2033 // they require.
2034 int out_arg_slots;
2035 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2036
2037 // Compute framesize for the wrapper. We need to handlize all oops in
2038 // incoming registers
2039
2040 // Calculate the total number of stack slots we will need.
2041
2042 // First count the abi requirement plus all of the outgoing args
2043 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2044
2045 // Now the space for the inbound oop handle area
2046 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
2047 if (is_critical_native) {
2048 // Critical natives may have to call out so they need a save area
2049 // for register arguments.
2050 int double_slots = 0;
2051 int single_slots = 0;
2052 for ( int i = 0; i < total_in_args; i++) {
2053 if (in_regs[i].first()->is_Register()) {
2054 const Register reg = in_regs[i].first()->as_Register();
2055 switch (in_sig_bt[i]) {
2056 case T_BOOLEAN:
2057 case T_BYTE:
2058 case T_SHORT:
2059 case T_CHAR:
2060 case T_INT: single_slots++; break;
2061 case T_ARRAY: // specific to LP64 (7145024)
2062 case T_LONG: double_slots++; break;
2063 default: ShouldNotReachHere();
2064 }
2065 } else if (in_regs[i].first()->is_XMMRegister()) {
2066 switch (in_sig_bt[i]) {
2067 case T_FLOAT: single_slots++; break;
2068 case T_DOUBLE: double_slots++; break;
2069 default: ShouldNotReachHere();
2070 }
2071 } else if (in_regs[i].first()->is_FloatRegister()) {
2072 ShouldNotReachHere();
2073 }
2074 }
2075 total_save_slots = double_slots * 2 + single_slots;
2076 // align the save area
2077 if (double_slots != 0) {
2078 stack_slots = align_up(stack_slots, 2);
2079 }
2080 }
2081
2082 int oop_handle_offset = stack_slots;
2083 stack_slots += total_save_slots;
2084
2085 // Now any space we need for handlizing a klass if static method
2086
2087 int klass_slot_offset = 0;
2088 int klass_offset = -1;
2089 int lock_slot_offset = 0;
2090 bool is_static = false;
2091
2092 if (method->is_static()) {
2093 klass_slot_offset = stack_slots;
2094 stack_slots += VMRegImpl::slots_per_word;
2095 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2096 is_static = true;
2097 }
2098
2099 // Plus a lock if needed
2100
2101 if (method->is_synchronized()) {
2102 lock_slot_offset = stack_slots;
2103 stack_slots += VMRegImpl::slots_per_word;
2104 }
2105
2106 // Now a place (+2) to save return values or temp during shuffling
2107 // + 4 for return address (which we own) and saved rbp
2108 stack_slots += 6;
2109
2110 // Ok The space we have allocated will look like:
2111 //
2112 //
2113 // FP-> | |
2114 // |---------------------|
2115 // | 2 slots for moves |
2116 // |---------------------|
2117 // | lock box (if sync) |
2118 // |---------------------| <- lock_slot_offset
2119 // | klass (if static) |
2120 // |---------------------| <- klass_slot_offset
2121 // | oopHandle area |
2122 // |---------------------| <- oop_handle_offset (6 java arg registers)
2123 // | outbound memory |
2124 // | based arguments |
2125 // | |
2126 // |---------------------|
2127 // | |
2128 // SP-> | out_preserved_slots |
2129 //
2130 //
2131
2132
2133 // Now compute actual number of stack words we need rounding to make
2134 // stack properly aligned.
2135 stack_slots = align_up(stack_slots, StackAlignmentInSlots);
2136
2137 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2138
2139 // First thing make an ic check to see if we should even be here
2140
2141 // We are free to use all registers as temps without saving them and
2142 // restoring them except rbp. rbp is the only callee save register
2143 // as far as the interpreter and the compiler(s) are concerned.
2144
2145
2146 const Register ic_reg = rax;
2147 const Register receiver = j_rarg0;
2148
2149 Label hit;
2150 Label exception_pending;
2151
2152 assert_different_registers(ic_reg, receiver, rscratch1);
2153 __ verify_oop(receiver);
2154 __ load_klass(rscratch1, receiver);
2155 __ cmpq(ic_reg, rscratch1);
2156 __ jcc(Assembler::equal, hit);
2157
2158 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2159
2160 // Verified entry point must be aligned
2161 __ align(8);
2162
2163 __ bind(hit);
2164
2165 int vep_offset = ((intptr_t)__ pc()) - start;
2166
2167 if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
2168 Label L_skip_barrier;
2169 Register klass = r10;
2170 __ mov_metadata(klass, method->method_holder()); // InstanceKlass*
2171 __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
2172
2173 __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
2174
2175 __ bind(L_skip_barrier);
2176 }
2177
2178 #ifdef COMPILER1
2179 // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2180 if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2181 inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2182 }
2183 #endif // COMPILER1
2184
2185 // The instruction at the verified entry point must be 5 bytes or longer
2186 // because it can be patched on the fly by make_non_entrant. The stack bang
2187 // instruction fits that requirement.
2188
2189 // Generate stack overflow check
2190
2191 if (UseStackBanging) {
2192 __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2193 } else {
2194 // need a 5 byte instruction to allow MT safe patching to non-entrant
2195 __ fat_nop();
2196 }
2197
2198 // Generate a new frame for the wrapper.
2199 __ enter();
2200 // -2 because return address is already present and so is saved rbp
2201 __ subptr(rsp, stack_size - 2*wordSize);
2202
2203 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
2204 bs->nmethod_entry_barrier(masm);
2205
2206 // Frame is now completed as far as size and linkage.
2207 int frame_complete = ((intptr_t)__ pc()) - start;
2208
2209 if (UseRTMLocking) {
2210 // Abort RTM transaction before calling JNI
2211 // because critical section will be large and will be
2212 // aborted anyway. Also nmethod could be deoptimized.
2213 __ xabort(0);
2214 }
2215
2216 #ifdef ASSERT
2217 {
2218 Label L;
2219 __ mov(rax, rsp);
2220 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2221 __ cmpptr(rax, rsp);
2222 __ jcc(Assembler::equal, L);
2223 __ stop("improperly aligned stack");
2224 __ bind(L);
2225 }
2226 #endif /* ASSERT */
2227
2228
2229 // We use r14 as the oop handle for the receiver/klass
2230 // It is callee save so it survives the call to native
2231
2232 const Register oop_handle_reg = r14;
2233
2234 if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
2235 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2236 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2237 }
2238
2239 //
2240 // We immediately shuffle the arguments so that any vm call we have to
2241 // make from here on out (sync slow path, jvmti, etc.) we will have
2242 // captured the oops from our caller and have a valid oopMap for
2243 // them.
2244
2245 // -----------------
2246 // The Grand Shuffle
2247
2248 // The Java calling convention is either equal (linux) or denser (win64) than the
2249 // c calling convention. However the because of the jni_env argument the c calling
2250 // convention always has at least one more (and two for static) arguments than Java.
2251 // Therefore if we move the args from java -> c backwards then we will never have
2252 // a register->register conflict and we don't have to build a dependency graph
2253 // and figure out how to break any cycles.
2254 //
2255
2256 // Record esp-based slot for receiver on stack for non-static methods
2257 int receiver_offset = -1;
2258
2259 // This is a trick. We double the stack slots so we can claim
2260 // the oops in the caller's frame. Since we are sure to have
2261 // more args than the caller doubling is enough to make
2262 // sure we can capture all the incoming oop args from the
2263 // caller.
2264 //
2265 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2266
2267 // Mark location of rbp (someday)
2268 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2269
2270 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2271 // All inbound args are referenced based on rbp and all outbound args via rsp.
2272
2273
2274 #ifdef ASSERT
2275 bool reg_destroyed[RegisterImpl::number_of_registers];
2276 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2277 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2278 reg_destroyed[r] = false;
2279 }
2280 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2281 freg_destroyed[f] = false;
2282 }
2283
2284 #endif /* ASSERT */
2285
2286 // This may iterate in two different directions depending on the
2287 // kind of native it is. The reason is that for regular JNI natives
2288 // the incoming and outgoing registers are offset upwards and for
2289 // critical natives they are offset down.
2290 GrowableArray<int> arg_order(2 * total_in_args);
2291 // Inbound arguments that need to be pinned for critical natives
2292 GrowableArray<int> pinned_args(total_in_args);
2293 // Current stack slot for storing register based array argument
2294 int pinned_slot = oop_handle_offset;
2295
2296 VMRegPair tmp_vmreg;
2297 tmp_vmreg.set2(rbx->as_VMReg());
2298
2299 if (!is_critical_native) {
2300 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2301 arg_order.push(i);
2302 arg_order.push(c_arg);
2303 }
2304 } else {
2305 // Compute a valid move order, using tmp_vmreg to break any cycles
2306 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2307 }
2308
2309 int temploc = -1;
2310 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2311 int i = arg_order.at(ai);
2312 int c_arg = arg_order.at(ai + 1);
2313 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2314 if (c_arg == -1) {
2315 assert(is_critical_native, "should only be required for critical natives");
2316 // This arg needs to be moved to a temporary
2317 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2318 in_regs[i] = tmp_vmreg;
2319 temploc = i;
2320 continue;
2321 } else if (i == -1) {
2322 assert(is_critical_native, "should only be required for critical natives");
2323 // Read from the temporary location
2324 assert(temploc != -1, "must be valid");
2325 i = temploc;
2326 temploc = -1;
2327 }
2328 #ifdef ASSERT
2329 if (in_regs[i].first()->is_Register()) {
2330 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2331 } else if (in_regs[i].first()->is_XMMRegister()) {
2332 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2333 }
2334 if (out_regs[c_arg].first()->is_Register()) {
2335 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2336 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2337 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2338 }
2339 #endif /* ASSERT */
2340 switch (in_sig_bt[i]) {
2341 case T_ARRAY:
2342 if (is_critical_native) {
2343 // pin before unpack
2344 if (Universe::heap()->supports_object_pinning()) {
2345 save_args(masm, total_c_args, 0, out_regs);
2346 gen_pin_object(masm, in_regs[i]);
2347 pinned_args.append(i);
2348 restore_args(masm, total_c_args, 0, out_regs);
2349
2350 // rax has pinned array
2351 VMRegPair result_reg;
2352 result_reg.set_ptr(rax->as_VMReg());
2353 move_ptr(masm, result_reg, in_regs[i]);
2354 if (!in_regs[i].first()->is_stack()) {
2355 assert(pinned_slot <= stack_slots, "overflow");
2356 move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
2357 pinned_slot += VMRegImpl::slots_per_word;
2358 }
2359 }
2360 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2361 c_arg++;
2362 #ifdef ASSERT
2363 if (out_regs[c_arg].first()->is_Register()) {
2364 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2365 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2366 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2367 }
2368 #endif
2369 break;
2370 }
2371 case T_OBJECT:
2372 assert(!is_critical_native, "no oop arguments");
2373 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2374 ((i == 0) && (!is_static)),
2375 &receiver_offset);
2376 break;
2377 case T_VOID:
2378 break;
2379
2380 case T_FLOAT:
2381 float_move(masm, in_regs[i], out_regs[c_arg]);
2382 break;
2383
2384 case T_DOUBLE:
2385 assert( i + 1 < total_in_args &&
2386 in_sig_bt[i + 1] == T_VOID &&
2387 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2388 double_move(masm, in_regs[i], out_regs[c_arg]);
2389 break;
2390
2391 case T_LONG :
2392 long_move(masm, in_regs[i], out_regs[c_arg]);
2393 break;
2394
2395 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2396
2397 default:
2398 move32_64(masm, in_regs[i], out_regs[c_arg]);
2399 }
2400 }
2401
2402 int c_arg;
2403
2404 // Pre-load a static method's oop into r14. Used both by locking code and
2405 // the normal JNI call code.
2406 if (!is_critical_native) {
2407 // point c_arg at the first arg that is already loaded in case we
2408 // need to spill before we call out
2409 c_arg = total_c_args - total_in_args;
2410
2411 if (method->is_static()) {
2412
2413 // load oop into a register
2414 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2415
2416 // Now handlize the static class mirror it's known not-null.
2417 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2418 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2419
2420 // Now get the handle
2421 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2422 // store the klass handle as second argument
2423 __ movptr(c_rarg1, oop_handle_reg);
2424 // and protect the arg if we must spill
2425 c_arg--;
2426 }
2427 } else {
2428 // For JNI critical methods we need to save all registers in save_args.
2429 c_arg = 0;
2430 }
2431
2432 // Change state to native (we save the return address in the thread, since it might not
2433 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2434 // points into the right code segment. It does not have to be the correct return pc.
2435 // We use the same pc/oopMap repeatedly when we call out
2436
2437 intptr_t the_pc = (intptr_t) __ pc();
2438 oop_maps->add_gc_map(the_pc - start, map);
2439
2440 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2441
2442
2443 // We have all of the arguments setup at this point. We must not touch any register
2444 // argument registers at this point (what if we save/restore them there are no oop?
2445
2446 {
2447 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2448 // protect the args we've loaded
2449 save_args(masm, total_c_args, c_arg, out_regs);
2450 __ mov_metadata(c_rarg1, method());
2451 __ call_VM_leaf(
2452 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2453 r15_thread, c_rarg1);
2454 restore_args(masm, total_c_args, c_arg, out_regs);
2455 }
2456
2457 // RedefineClasses() tracing support for obsolete method entry
2458 if (log_is_enabled(Trace, redefine, class, obsolete)) {
2459 // protect the args we've loaded
2460 save_args(masm, total_c_args, c_arg, out_regs);
2461 __ mov_metadata(c_rarg1, method());
2462 __ call_VM_leaf(
2463 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2464 r15_thread, c_rarg1);
2465 restore_args(masm, total_c_args, c_arg, out_regs);
2466 }
2467
2468 // Lock a synchronized method
2469
2470 // Register definitions used by locking and unlocking
2471
2472 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2473 const Register obj_reg = rbx; // Will contain the oop
2474 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2475 const Register old_hdr = r13; // value of old header at unlock time
2476
2477 Label slow_path_lock;
2478 Label lock_done;
2479
2480 if (method->is_synchronized()) {
2481 assert(!is_critical_native, "unhandled");
2482
2483
2484 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2485
2486 // Get the handle (the 2nd argument)
2487 __ mov(oop_handle_reg, c_rarg1);
2488
2489 // Get address of the box
2490
2491 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2492
2493 // Load the oop from the handle
2494 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2495
2496 __ resolve(IS_NOT_NULL, obj_reg);
2497 if (UseBiasedLocking) {
2498 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2499 }
2500
2501 // Load immediate 1 into swap_reg %rax
2502 __ movl(swap_reg, 1);
2503
2504 // Load (object->mark() | 1) into swap_reg %rax
2505 __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2506
2507 // Save (object->mark() | 1) into BasicLock's displaced header
2508 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2509
2510 // src -> dest iff dest == rax else rax <- dest
2511 __ lock();
2512 __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2513 __ jcc(Assembler::equal, lock_done);
2514
2515 // Hmm should this move to the slow path code area???
2516
2517 // Test if the oopMark is an obvious stack pointer, i.e.,
2518 // 1) (mark & 3) == 0, and
2519 // 2) rsp <= mark < mark + os::pagesize()
2520 // These 3 tests can be done by evaluating the following
2521 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2522 // assuming both stack pointer and pagesize have their
2523 // least significant 2 bits clear.
2524 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2525
2526 __ subptr(swap_reg, rsp);
2527 __ andptr(swap_reg, 3 - os::vm_page_size());
2528
2529 // Save the test result, for recursive case, the result is zero
2530 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2531 __ jcc(Assembler::notEqual, slow_path_lock);
2532
2533 // Slow path will re-enter here
2534
2535 __ bind(lock_done);
2536 }
2537
2538
2539 // Finally just about ready to make the JNI call
2540
2541
2542 // get JNIEnv* which is first argument to native
2543 if (!is_critical_native) {
2544 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2545 }
2546
2547 // Now set thread in native
2548 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2549
2550 __ call(RuntimeAddress(native_func));
2551
2552 // Verify or restore cpu control state after JNI call
2553 __ restore_cpu_control_state_after_jni();
2554
2555 // Unpack native results.
2556 switch (ret_type) {
2557 case T_BOOLEAN: __ c2bool(rax); break;
2558 case T_CHAR : __ movzwl(rax, rax); break;
2559 case T_BYTE : __ sign_extend_byte (rax); break;
2560 case T_SHORT : __ sign_extend_short(rax); break;
2561 case T_INT : /* nothing to do */ break;
2562 case T_DOUBLE :
2563 case T_FLOAT :
2564 // Result is in xmm0 we'll save as needed
2565 break;
2566 case T_ARRAY: // Really a handle
2567 case T_OBJECT: // Really a handle
2568 break; // can't de-handlize until after safepoint check
2569 case T_VOID: break;
2570 case T_LONG: break;
2571 default : ShouldNotReachHere();
2572 }
2573
2574 // unpin pinned arguments
2575 pinned_slot = oop_handle_offset;
2576 if (pinned_args.length() > 0) {
2577 // save return value that may be overwritten otherwise.
2578 save_native_result(masm, ret_type, stack_slots);
2579 for (int index = 0; index < pinned_args.length(); index ++) {
2580 int i = pinned_args.at(index);
2581 assert(pinned_slot <= stack_slots, "overflow");
2582 if (!in_regs[i].first()->is_stack()) {
2583 int offset = pinned_slot * VMRegImpl::stack_slot_size;
2584 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
2585 pinned_slot += VMRegImpl::slots_per_word;
2586 }
2587 gen_unpin_object(masm, in_regs[i]);
2588 }
2589 restore_native_result(masm, ret_type, stack_slots);
2590 }
2591
2592 // Switch thread to "native transition" state before reading the synchronization state.
2593 // This additional state is necessary because reading and testing the synchronization
2594 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2595 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2596 // VM thread changes sync state to synchronizing and suspends threads for GC.
2597 // Thread A is resumed to finish this native method, but doesn't block here since it
2598 // didn't see any synchronization is progress, and escapes.
2599 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2600
2601 // Force this write out before the read below
2602 __ membar(Assembler::Membar_mask_bits(
2603 Assembler::LoadLoad | Assembler::LoadStore |
2604 Assembler::StoreLoad | Assembler::StoreStore));
2605
2606 Label after_transition;
2607
2608 // check for safepoint operation in progress and/or pending suspend requests
2609 {
2610 Label Continue;
2611 Label slow_path;
2612
2613 __ safepoint_poll(slow_path, r15_thread, rscratch1);
2614
2615 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2616 __ jcc(Assembler::equal, Continue);
2617 __ bind(slow_path);
2618
2619 // Don't use call_VM as it will see a possible pending exception and forward it
2620 // and never return here preventing us from clearing _last_native_pc down below.
2621 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2622 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2623 // by hand.
2624 //
2625 __ vzeroupper();
2626 save_native_result(masm, ret_type, stack_slots);
2627 __ mov(c_rarg0, r15_thread);
2628 __ mov(r12, rsp); // remember sp
2629 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2630 __ andptr(rsp, -16); // align stack as required by ABI
2631 if (!is_critical_native) {
2632 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2633 } else {
2634 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2635 }
2636 __ mov(rsp, r12); // restore sp
2637 __ reinit_heapbase();
2638 // Restore any method result value
2639 restore_native_result(masm, ret_type, stack_slots);
2640
2641 if (is_critical_native) {
2642 // The call above performed the transition to thread_in_Java so
2643 // skip the transition logic below.
2644 __ jmpb(after_transition);
2645 }
2646
2647 __ bind(Continue);
2648 }
2649
2650 // change thread state
2651 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2652 __ bind(after_transition);
2653
2654 Label reguard;
2655 Label reguard_done;
2656 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2657 __ jcc(Assembler::equal, reguard);
2658 __ bind(reguard_done);
2659
2660 // native result if any is live
2661
2662 // Unlock
2663 Label unlock_done;
2664 Label slow_path_unlock;
2665 if (method->is_synchronized()) {
2666
2667 // Get locked oop from the handle we passed to jni
2668 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2669 __ resolve(IS_NOT_NULL, obj_reg);
2670
2671 Label done;
2672
2673 if (UseBiasedLocking) {
2674 __ biased_locking_exit(obj_reg, old_hdr, done);
2675 }
2676
2677 // Simple recursive lock?
2678
2679 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2680 __ jcc(Assembler::equal, done);
2681
2682 // Must save rax if if it is live now because cmpxchg must use it
2683 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2684 save_native_result(masm, ret_type, stack_slots);
2685 }
2686
2687
2688 // get address of the stack lock
2689 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2690 // get old displaced header
2691 __ movptr(old_hdr, Address(rax, 0));
2692
2693 // Atomic swap old header if oop still contains the stack lock
2694 __ lock();
2695 __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
2696 __ jcc(Assembler::notEqual, slow_path_unlock);
2697
2698 // slow path re-enters here
2699 __ bind(unlock_done);
2700 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2701 restore_native_result(masm, ret_type, stack_slots);
2702 }
2703
2704 __ bind(done);
2705
2706 }
2707 {
2708 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2709 save_native_result(masm, ret_type, stack_slots);
2710 __ mov_metadata(c_rarg1, method());
2711 __ call_VM_leaf(
2712 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2713 r15_thread, c_rarg1);
2714 restore_native_result(masm, ret_type, stack_slots);
2715 }
2716
2717 __ reset_last_Java_frame(false);
2718
2719 // Unbox oop result, e.g. JNIHandles::resolve value.
2720 if (is_reference_type(ret_type)) {
2721 __ resolve_jobject(rax /* value */,
2722 r15_thread /* thread */,
2723 rcx /* tmp */);
2724 }
2725
2726 if (CheckJNICalls) {
2727 // clear_pending_jni_exception_check
2728 __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD);
2729 }
2730
2731 if (!is_critical_native) {
2732 // reset handle block
2733 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2734 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2735 }
2736
2737 // pop our frame
2738
2739 __ leave();
2740
2741 if (!is_critical_native) {
2742 // Any exception pending?
2743 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2744 __ jcc(Assembler::notEqual, exception_pending);
2745 }
2746
2747 // Return
2748
2749 __ ret(0);
2750
2751 // Unexpected paths are out of line and go here
2752
2753 if (!is_critical_native) {
2754 // forward the exception
2755 __ bind(exception_pending);
2756
2757 // and forward the exception
2758 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2759 }
2760
2761 // Slow path locking & unlocking
2762 if (method->is_synchronized()) {
2763
2764 // BEGIN Slow path lock
2765 __ bind(slow_path_lock);
2766
2767 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2768 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2769
2770 // protect the args we've loaded
2771 save_args(masm, total_c_args, c_arg, out_regs);
2772
2773 __ mov(c_rarg0, obj_reg);
2774 __ mov(c_rarg1, lock_reg);
2775 __ mov(c_rarg2, r15_thread);
2776
2777 // Not a leaf but we have last_Java_frame setup as we want
2778 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2779 restore_args(masm, total_c_args, c_arg, out_regs);
2780
2781 #ifdef ASSERT
2782 { Label L;
2783 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2784 __ jcc(Assembler::equal, L);
2785 __ stop("no pending exception allowed on exit from monitorenter");
2786 __ bind(L);
2787 }
2788 #endif
2789 __ jmp(lock_done);
2790
2791 // END Slow path lock
2792
2793 // BEGIN Slow path unlock
2794 __ bind(slow_path_unlock);
2795
2796 // If we haven't already saved the native result we must save it now as xmm registers
2797 // are still exposed.
2798 __ vzeroupper();
2799 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2800 save_native_result(masm, ret_type, stack_slots);
2801 }
2802
2803 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2804
2805 __ mov(c_rarg0, obj_reg);
2806 __ mov(c_rarg2, r15_thread);
2807 __ mov(r12, rsp); // remember sp
2808 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2809 __ andptr(rsp, -16); // align stack as required by ABI
2810
2811 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2812 // NOTE that obj_reg == rbx currently
2813 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2814 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2815
2816 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2817 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2818 __ mov(rsp, r12); // restore sp
2819 __ reinit_heapbase();
2820 #ifdef ASSERT
2821 {
2822 Label L;
2823 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2824 __ jcc(Assembler::equal, L);
2825 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2826 __ bind(L);
2827 }
2828 #endif /* ASSERT */
2829
2830 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2831
2832 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2833 restore_native_result(masm, ret_type, stack_slots);
2834 }
2835 __ jmp(unlock_done);
2836
2837 // END Slow path unlock
2838
2839 } // synchronized
2840
2841 // SLOW PATH Reguard the stack if needed
2842
2843 __ bind(reguard);
2844 __ vzeroupper();
2845 save_native_result(masm, ret_type, stack_slots);
2846 __ mov(r12, rsp); // remember sp
2847 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2848 __ andptr(rsp, -16); // align stack as required by ABI
2849 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2850 __ mov(rsp, r12); // restore sp
2851 __ reinit_heapbase();
2852 restore_native_result(masm, ret_type, stack_slots);
2853 // and continue
2854 __ jmp(reguard_done);
2855
2856
2857
2858 __ flush();
2859
2860 nmethod *nm = nmethod::new_native_nmethod(method,
2861 compile_id,
2862 masm->code(),
2863 vep_offset,
2864 frame_complete,
2865 stack_slots / VMRegImpl::slots_per_word,
2866 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2867 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2868 oop_maps);
2869
2870 if (is_critical_native) {
2871 nm->set_lazy_critical_native(true);
2872 }
2873
2874 return nm;
2875
2876 }
2877
2878 // this function returns the adjust size (in number of words) to a c2i adapter
2879 // activation for use during deoptimization
2880 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2881 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2882 }
2883
2884
2885 uint SharedRuntime::out_preserve_stack_slots() {
2886 return 0;
2887 }
2888
2889 //------------------------------generate_deopt_blob----------------------------
2890 void SharedRuntime::generate_deopt_blob() {
2891 // Allocate space for the code
2892 ResourceMark rm;
2893 // Setup code generation tools
2894 int pad = 0;
2895 #if INCLUDE_JVMCI
2896 if (EnableJVMCI || UseAOT) {
2897 pad += 512; // Increase the buffer size when compiling for JVMCI
2898 }
2899 #endif
2900 CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2901 MacroAssembler* masm = new MacroAssembler(&buffer);
2902 int frame_size_in_words;
2903 OopMap* map = NULL;
2904 OopMapSet *oop_maps = new OopMapSet();
2905
2906 // -------------
2907 // This code enters when returning to a de-optimized nmethod. A return
2908 // address has been pushed on the the stack, and return values are in
2909 // registers.
2910 // If we are doing a normal deopt then we were called from the patched
2911 // nmethod from the point we returned to the nmethod. So the return
2912 // address on the stack is wrong by NativeCall::instruction_size
2913 // We will adjust the value so it looks like we have the original return
2914 // address on the stack (like when we eagerly deoptimized).
2915 // In the case of an exception pending when deoptimizing, we enter
2916 // with a return address on the stack that points after the call we patched
2917 // into the exception handler. We have the following register state from,
2918 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2919 // rax: exception oop
2920 // rbx: exception handler
2921 // rdx: throwing pc
2922 // So in this case we simply jam rdx into the useless return address and
2923 // the stack looks just like we want.
2924 //
2925 // At this point we need to de-opt. We save the argument return
2926 // registers. We call the first C routine, fetch_unroll_info(). This
2927 // routine captures the return values and returns a structure which
2928 // describes the current frame size and the sizes of all replacement frames.
2929 // The current frame is compiled code and may contain many inlined
2930 // functions, each with their own JVM state. We pop the current frame, then
2931 // push all the new frames. Then we call the C routine unpack_frames() to
2932 // populate these frames. Finally unpack_frames() returns us the new target
2933 // address. Notice that callee-save registers are BLOWN here; they have
2934 // already been captured in the vframeArray at the time the return PC was
2935 // patched.
2936 address start = __ pc();
2937 Label cont;
2938
2939 // Prolog for non exception case!
2940
2941 // Save everything in sight.
2942 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2943
2944 // Normal deoptimization. Save exec mode for unpack_frames.
2945 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2946 __ jmp(cont);
2947
2948 int reexecute_offset = __ pc() - start;
2949 #if INCLUDE_JVMCI && !defined(COMPILER1)
2950 if (EnableJVMCI && UseJVMCICompiler) {
2951 // JVMCI does not use this kind of deoptimization
2952 __ should_not_reach_here();
2953 }
2954 #endif
2955
2956 // Reexecute case
2957 // return address is the pc describes what bci to do re-execute at
2958
2959 // No need to update map as each call to save_live_registers will produce identical oopmap
2960 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2961
2962 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2963 __ jmp(cont);
2964
2965 #if INCLUDE_JVMCI
2966 Label after_fetch_unroll_info_call;
2967 int implicit_exception_uncommon_trap_offset = 0;
2968 int uncommon_trap_offset = 0;
2969
2970 if (EnableJVMCI || UseAOT) {
2971 implicit_exception_uncommon_trap_offset = __ pc() - start;
2972
2973 __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2974 __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
2975
2976 uncommon_trap_offset = __ pc() - start;
2977
2978 // Save everything in sight.
2979 RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2980 // fetch_unroll_info needs to call last_java_frame()
2981 __ set_last_Java_frame(noreg, noreg, NULL);
2982
2983 __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2984 __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2985
2986 __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
2987 __ mov(c_rarg0, r15_thread);
2988 __ movl(c_rarg2, r14); // exec mode
2989 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2990 oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2991
2992 __ reset_last_Java_frame(false);
2993
2994 __ jmp(after_fetch_unroll_info_call);
2995 } // EnableJVMCI
2996 #endif // INCLUDE_JVMCI
2997
2998 int exception_offset = __ pc() - start;
2999
3000 // Prolog for exception case
3001
3002 // all registers are dead at this entry point, except for rax, and
3003 // rdx which contain the exception oop and exception pc
3004 // respectively. Set them in TLS and fall thru to the
3005 // unpack_with_exception_in_tls entry point.
3006
3007 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3008 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3009
3010 int exception_in_tls_offset = __ pc() - start;
3011
3012 // new implementation because exception oop is now passed in JavaThread
3013
3014 // Prolog for exception case
3015 // All registers must be preserved because they might be used by LinearScan
3016 // Exceptiop oop and throwing PC are passed in JavaThread
3017 // tos: stack at point of call to method that threw the exception (i.e. only
3018 // args are on the stack, no return address)
3019
3020 // make room on stack for the return address
3021 // It will be patched later with the throwing pc. The correct value is not
3022 // available now because loading it from memory would destroy registers.
3023 __ push(0);
3024
3025 // Save everything in sight.
3026 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3027
3028 // Now it is safe to overwrite any register
3029
3030 // Deopt during an exception. Save exec mode for unpack_frames.
3031 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3032
3033 // load throwing pc from JavaThread and patch it as the return address
3034 // of the current frame. Then clear the field in JavaThread
3035
3036 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3037 __ movptr(Address(rbp, wordSize), rdx);
3038 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3039
3040 #ifdef ASSERT
3041 // verify that there is really an exception oop in JavaThread
3042 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3043 __ verify_oop(rax);
3044
3045 // verify that there is no pending exception
3046 Label no_pending_exception;
3047 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3048 __ testptr(rax, rax);
3049 __ jcc(Assembler::zero, no_pending_exception);
3050 __ stop("must not have pending exception here");
3051 __ bind(no_pending_exception);
3052 #endif
3053
3054 __ bind(cont);
3055
3056 // Call C code. Need thread and this frame, but NOT official VM entry
3057 // crud. We cannot block on this call, no GC can happen.
3058 //
3059 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3060
3061 // fetch_unroll_info needs to call last_java_frame().
3062
3063 __ set_last_Java_frame(noreg, noreg, NULL);
3064 #ifdef ASSERT
3065 { Label L;
3066 __ cmpptr(Address(r15_thread,
3067 JavaThread::last_Java_fp_offset()),
3068 (int32_t)0);
3069 __ jcc(Assembler::equal, L);
3070 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3071 __ bind(L);
3072 }
3073 #endif // ASSERT
3074 __ mov(c_rarg0, r15_thread);
3075 __ movl(c_rarg1, r14); // exec_mode
3076 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3077
3078 // Need to have an oopmap that tells fetch_unroll_info where to
3079 // find any register it might need.
3080 oop_maps->add_gc_map(__ pc() - start, map);
3081
3082 __ reset_last_Java_frame(false);
3083
3084 #if INCLUDE_JVMCI
3085 if (EnableJVMCI || UseAOT) {
3086 __ bind(after_fetch_unroll_info_call);
3087 }
3088 #endif
3089
3090 // Load UnrollBlock* into rdi
3091 __ mov(rdi, rax);
3092
3093 __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
3094 Label noException;
3095 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3096 __ jcc(Assembler::notEqual, noException);
3097 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3098 // QQQ this is useless it was NULL above
3099 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3100 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3101 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3102
3103 __ verify_oop(rax);
3104
3105 // Overwrite the result registers with the exception results.
3106 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3107 // I think this is useless
3108 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3109
3110 __ bind(noException);
3111
3112 // Only register save data is on the stack.
3113 // Now restore the result registers. Everything else is either dead
3114 // or captured in the vframeArray.
3115 RegisterSaver::restore_result_registers(masm);
3116
3117 // All of the register save area has been popped of the stack. Only the
3118 // return address remains.
3119
3120 // Pop all the frames we must move/replace.
3121 //
3122 // Frame picture (youngest to oldest)
3123 // 1: self-frame (no frame link)
3124 // 2: deopting frame (no frame link)
3125 // 3: caller of deopting frame (could be compiled/interpreted).
3126 //
3127 // Note: by leaving the return address of self-frame on the stack
3128 // and using the size of frame 2 to adjust the stack
3129 // when we are done the return to frame 3 will still be on the stack.
3130
3131 // Pop deoptimized frame
3132 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3133 __ addptr(rsp, rcx);
3134
3135 // rsp should be pointing at the return address to the caller (3)
3136
3137 // Pick up the initial fp we should save
3138 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3139 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3140
3141 #ifdef ASSERT
3142 // Compilers generate code that bang the stack by as much as the
3143 // interpreter would need. So this stack banging should never
3144 // trigger a fault. Verify that it does not on non product builds.
3145 if (UseStackBanging) {
3146 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3147 __ bang_stack_size(rbx, rcx);
3148 }
3149 #endif
3150
3151 // Load address of array of frame pcs into rcx
3152 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3153
3154 // Trash the old pc
3155 __ addptr(rsp, wordSize);
3156
3157 // Load address of array of frame sizes into rsi
3158 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3159
3160 // Load counter into rdx
3161 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3162
3163 // Now adjust the caller's stack to make up for the extra locals
3164 // but record the original sp so that we can save it in the skeletal interpreter
3165 // frame and the stack walking of interpreter_sender will get the unextended sp
3166 // value and not the "real" sp value.
3167
3168 const Register sender_sp = r8;
3169
3170 __ mov(sender_sp, rsp);
3171 __ movl(rbx, Address(rdi,
3172 Deoptimization::UnrollBlock::
3173 caller_adjustment_offset_in_bytes()));
3174 __ subptr(rsp, rbx);
3175
3176 // Push interpreter frames in a loop
3177 Label loop;
3178 __ bind(loop);
3179 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3180 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3181 __ pushptr(Address(rcx, 0)); // Save return address
3182 __ enter(); // Save old & set new ebp
3183 __ subptr(rsp, rbx); // Prolog
3184 // This value is corrected by layout_activation_impl
3185 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3186 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3187 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3188 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3189 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3190 __ decrementl(rdx); // Decrement counter
3191 __ jcc(Assembler::notZero, loop);
3192 __ pushptr(Address(rcx, 0)); // Save final return address
3193
3194 // Re-push self-frame
3195 __ enter(); // Save old & set new ebp
3196
3197 // Allocate a full sized register save area.
3198 // Return address and rbp are in place, so we allocate two less words.
3199 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3200
3201 // Restore frame locals after moving the frame
3202 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3203 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3204
3205 // Call C code. Need thread but NOT official VM entry
3206 // crud. We cannot block on this call, no GC can happen. Call should
3207 // restore return values to their stack-slots with the new SP.
3208 //
3209 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3210
3211 // Use rbp because the frames look interpreted now
3212 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3213 // Don't need the precise return PC here, just precise enough to point into this code blob.
3214 address the_pc = __ pc();
3215 __ set_last_Java_frame(noreg, rbp, the_pc);
3216
3217 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3218 __ mov(c_rarg0, r15_thread);
3219 __ movl(c_rarg1, r14); // second arg: exec_mode
3220 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3221 // Revert SP alignment after call since we're going to do some SP relative addressing below
3222 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3223
3224 // Set an oopmap for the call site
3225 // Use the same PC we used for the last java frame
3226 oop_maps->add_gc_map(the_pc - start,
3227 new OopMap( frame_size_in_words, 0 ));
3228
3229 // Clear fp AND pc
3230 __ reset_last_Java_frame(true);
3231
3232 // Collect return values
3233 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3234 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3235 // I think this is useless (throwing pc?)
3236 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3237
3238 // Pop self-frame.
3239 __ leave(); // Epilog
3240
3241 // Jump to interpreter
3242 __ ret(0);
3243
3244 // Make sure all code is generated
3245 masm->flush();
3246
3247 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3248 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3249 #if INCLUDE_JVMCI
3250 if (EnableJVMCI || UseAOT) {
3251 _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3252 _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3253 }
3254 #endif
3255 }
3256
3257 #ifdef COMPILER2
3258 //------------------------------generate_uncommon_trap_blob--------------------
3259 void SharedRuntime::generate_uncommon_trap_blob() {
3260 // Allocate space for the code
3261 ResourceMark rm;
3262 // Setup code generation tools
3263 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3264 MacroAssembler* masm = new MacroAssembler(&buffer);
3265
3266 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3267
3268 address start = __ pc();
3269
3270 if (UseRTMLocking) {
3271 // Abort RTM transaction before possible nmethod deoptimization.
3272 __ xabort(0);
3273 }
3274
3275 // Push self-frame. We get here with a return address on the
3276 // stack, so rsp is 8-byte aligned until we allocate our frame.
3277 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3278
3279 // No callee saved registers. rbp is assumed implicitly saved
3280 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3281
3282 // compiler left unloaded_class_index in j_rarg0 move to where the
3283 // runtime expects it.
3284 __ movl(c_rarg1, j_rarg0);
3285
3286 __ set_last_Java_frame(noreg, noreg, NULL);
3287
3288 // Call C code. Need thread but NOT official VM entry
3289 // crud. We cannot block on this call, no GC can happen. Call should
3290 // capture callee-saved registers as well as return values.
3291 // Thread is in rdi already.
3292 //
3293 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3294
3295 __ mov(c_rarg0, r15_thread);
3296 __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3297 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3298
3299 // Set an oopmap for the call site
3300 OopMapSet* oop_maps = new OopMapSet();
3301 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3302
3303 // location of rbp is known implicitly by the frame sender code
3304
3305 oop_maps->add_gc_map(__ pc() - start, map);
3306
3307 __ reset_last_Java_frame(false);
3308
3309 // Load UnrollBlock* into rdi
3310 __ mov(rdi, rax);
3311
3312 #ifdef ASSERT
3313 { Label L;
3314 __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3315 (int32_t)Deoptimization::Unpack_uncommon_trap);
3316 __ jcc(Assembler::equal, L);
3317 __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3318 __ bind(L);
3319 }
3320 #endif
3321
3322 // Pop all the frames we must move/replace.
3323 //
3324 // Frame picture (youngest to oldest)
3325 // 1: self-frame (no frame link)
3326 // 2: deopting frame (no frame link)
3327 // 3: caller of deopting frame (could be compiled/interpreted).
3328
3329 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3330 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3331
3332 // Pop deoptimized frame (int)
3333 __ movl(rcx, Address(rdi,
3334 Deoptimization::UnrollBlock::
3335 size_of_deoptimized_frame_offset_in_bytes()));
3336 __ addptr(rsp, rcx);
3337
3338 // rsp should be pointing at the return address to the caller (3)
3339
3340 // Pick up the initial fp we should save
3341 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3342 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3343
3344 #ifdef ASSERT
3345 // Compilers generate code that bang the stack by as much as the
3346 // interpreter would need. So this stack banging should never
3347 // trigger a fault. Verify that it does not on non product builds.
3348 if (UseStackBanging) {
3349 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3350 __ bang_stack_size(rbx, rcx);
3351 }
3352 #endif
3353
3354 // Load address of array of frame pcs into rcx (address*)
3355 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3356
3357 // Trash the return pc
3358 __ addptr(rsp, wordSize);
3359
3360 // Load address of array of frame sizes into rsi (intptr_t*)
3361 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3362
3363 // Counter
3364 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3365
3366 // Now adjust the caller's stack to make up for the extra locals but
3367 // record the original sp so that we can save it in the skeletal
3368 // interpreter frame and the stack walking of interpreter_sender
3369 // will get the unextended sp value and not the "real" sp value.
3370
3371 const Register sender_sp = r8;
3372
3373 __ mov(sender_sp, rsp);
3374 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3375 __ subptr(rsp, rbx);
3376
3377 // Push interpreter frames in a loop
3378 Label loop;
3379 __ bind(loop);
3380 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3381 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3382 __ pushptr(Address(rcx, 0)); // Save return address
3383 __ enter(); // Save old & set new rbp
3384 __ subptr(rsp, rbx); // Prolog
3385 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3386 sender_sp); // Make it walkable
3387 // This value is corrected by layout_activation_impl
3388 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3389 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3390 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3391 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3392 __ decrementl(rdx); // Decrement counter
3393 __ jcc(Assembler::notZero, loop);
3394 __ pushptr(Address(rcx, 0)); // Save final return address
3395
3396 // Re-push self-frame
3397 __ enter(); // Save old & set new rbp
3398 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3399 // Prolog
3400
3401 // Use rbp because the frames look interpreted now
3402 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3403 // Don't need the precise return PC here, just precise enough to point into this code blob.
3404 address the_pc = __ pc();
3405 __ set_last_Java_frame(noreg, rbp, the_pc);
3406
3407 // Call C code. Need thread but NOT official VM entry
3408 // crud. We cannot block on this call, no GC can happen. Call should
3409 // restore return values to their stack-slots with the new SP.
3410 // Thread is in rdi already.
3411 //
3412 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3413
3414 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3415 __ mov(c_rarg0, r15_thread);
3416 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3417 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3418
3419 // Set an oopmap for the call site
3420 // Use the same PC we used for the last java frame
3421 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3422
3423 // Clear fp AND pc
3424 __ reset_last_Java_frame(true);
3425
3426 // Pop self-frame.
3427 __ leave(); // Epilog
3428
3429 // Jump to interpreter
3430 __ ret(0);
3431
3432 // Make sure all code is generated
3433 masm->flush();
3434
3435 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3436 SimpleRuntimeFrame::framesize >> 1);
3437 }
3438 #endif // COMPILER2
3439
3440
3441 //------------------------------generate_handler_blob------
3442 //
3443 // Generate a special Compile2Runtime blob that saves all registers,
3444 // and setup oopmap.
3445 //
3446 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3447 assert(StubRoutines::forward_exception_entry() != NULL,
3448 "must be generated before");
3449
3450 ResourceMark rm;
3451 OopMapSet *oop_maps = new OopMapSet();
3452 OopMap* map;
3453
3454 // Allocate space for the code. Setup code generation tools.
3455 CodeBuffer buffer("handler_blob", 2048, 1024);
3456 MacroAssembler* masm = new MacroAssembler(&buffer);
3457
3458 address start = __ pc();
3459 address call_pc = NULL;
3460 int frame_size_in_words;
3461 bool cause_return = (poll_type == POLL_AT_RETURN);
3462 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3463
3464 if (UseRTMLocking) {
3465 // Abort RTM transaction before calling runtime
3466 // because critical section will be large and will be
3467 // aborted anyway. Also nmethod could be deoptimized.
3468 __ xabort(0);
3469 }
3470
3471 // Make room for return address (or push it again)
3472 if (!cause_return) {
3473 __ push(rbx);
3474 }
3475
3476 // Save registers, fpu state, and flags
3477 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3478
3479 // The following is basically a call_VM. However, we need the precise
3480 // address of the call in order to generate an oopmap. Hence, we do all the
3481 // work outselves.
3482
3483 __ set_last_Java_frame(noreg, noreg, NULL);
3484
3485 // The return address must always be correct so that frame constructor never
3486 // sees an invalid pc.
3487
3488 if (!cause_return) {
3489 // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
3490 // Additionally, rbx is a callee saved register and we can look at it later to determine
3491 // if someone changed the return address for us!
3492 __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3493 __ movptr(Address(rbp, wordSize), rbx);
3494 }
3495
3496 // Do the call
3497 __ mov(c_rarg0, r15_thread);
3498 __ call(RuntimeAddress(call_ptr));
3499
3500 // Set an oopmap for the call site. This oopmap will map all
3501 // oop-registers and debug-info registers as callee-saved. This
3502 // will allow deoptimization at this safepoint to find all possible
3503 // debug-info recordings, as well as let GC find all oops.
3504
3505 oop_maps->add_gc_map( __ pc() - start, map);
3506
3507 Label noException;
3508
3509 __ reset_last_Java_frame(false);
3510
3511 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3512 __ jcc(Assembler::equal, noException);
3513
3514 // Exception pending
3515
3516 RegisterSaver::restore_live_registers(masm, save_vectors);
3517
3518 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3519
3520 // No exception case
3521 __ bind(noException);
3522
3523 Label no_adjust;
3524 #ifdef ASSERT
3525 Label bail;
3526 #endif
3527 if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
3528 Label no_prefix, not_special;
3529
3530 // If our stashed return pc was modified by the runtime we avoid touching it
3531 __ cmpptr(rbx, Address(rbp, wordSize));
3532 __ jccb(Assembler::notEqual, no_adjust);
3533
3534 // Skip over the poll instruction.
3535 // See NativeInstruction::is_safepoint_poll()
3536 // Possible encodings:
3537 // 85 00 test %eax,(%rax)
3538 // 85 01 test %eax,(%rcx)
3539 // 85 02 test %eax,(%rdx)
3540 // 85 03 test %eax,(%rbx)
3541 // 85 06 test %eax,(%rsi)
3542 // 85 07 test %eax,(%rdi)
3543 //
3544 // 41 85 00 test %eax,(%r8)
3545 // 41 85 01 test %eax,(%r9)
3546 // 41 85 02 test %eax,(%r10)
3547 // 41 85 03 test %eax,(%r11)
3548 // 41 85 06 test %eax,(%r14)
3549 // 41 85 07 test %eax,(%r15)
3550 //
3551 // 85 04 24 test %eax,(%rsp)
3552 // 41 85 04 24 test %eax,(%r12)
3553 // 85 45 00 test %eax,0x0(%rbp)
3554 // 41 85 45 00 test %eax,0x0(%r13)
3555
3556 __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
3557 __ jcc(Assembler::notEqual, no_prefix);
3558 __ addptr(rbx, 1);
3559 __ bind(no_prefix);
3560 #ifdef ASSERT
3561 __ movptr(rax, rbx); // remember where 0x85 should be, for verification below
3562 #endif
3563 // r12/r13/rsp/rbp base encoding takes 3 bytes with the following register values:
3564 // r12/rsp 0x04
3565 // r13/rbp 0x05
3566 __ movzbq(rcx, Address(rbx, 1));
3567 __ andptr(rcx, 0x07); // looking for 0x04 .. 0x05
3568 __ subptr(rcx, 4); // looking for 0x00 .. 0x01
3569 __ cmpptr(rcx, 1);
3570 __ jcc(Assembler::above, not_special);
3571 __ addptr(rbx, 1);
3572 __ bind(not_special);
3573 #ifdef ASSERT
3574 // Verify the correct encoding of the poll we're about to skip.
3575 __ cmpb(Address(rax, 0), NativeTstRegMem::instruction_code_memXregl);
3576 __ jcc(Assembler::notEqual, bail);
3577 // Mask out the modrm bits
3578 __ testb(Address(rax, 1), NativeTstRegMem::modrm_mask);
3579 // rax encodes to 0, so if the bits are nonzero it's incorrect
3580 __ jcc(Assembler::notZero, bail);
3581 #endif
3582 // Adjust return pc forward to step over the safepoint poll instruction
3583 __ addptr(rbx, 2);
3584 __ movptr(Address(rbp, wordSize), rbx);
3585 }
3586
3587 __ bind(no_adjust);
3588 // Normal exit, restore registers and exit.
3589 RegisterSaver::restore_live_registers(masm, save_vectors);
3590 __ ret(0);
3591
3592 #ifdef ASSERT
3593 __ bind(bail);
3594 __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
3595 #endif
3596
3597 // Make sure all code is generated
3598 masm->flush();
3599
3600 // Fill-out other meta info
3601 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3602 }
3603
3604 //
3605 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3606 //
3607 // Generate a stub that calls into vm to find out the proper destination
3608 // of a java call. All the argument registers are live at this point
3609 // but since this is generic code we don't know what they are and the caller
3610 // must do any gc of the args.
3611 //
3612 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3613 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3614
3615 // allocate space for the code
3616 ResourceMark rm;
3617
3618 CodeBuffer buffer(name, 1000, 512);
3619 MacroAssembler* masm = new MacroAssembler(&buffer);
3620
3621 int frame_size_in_words;
3622
3623 OopMapSet *oop_maps = new OopMapSet();
3624 OopMap* map = NULL;
3625
3626 int start = __ offset();
3627
3628 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3629
3630 int frame_complete = __ offset();
3631
3632 __ set_last_Java_frame(noreg, noreg, NULL);
3633
3634 __ mov(c_rarg0, r15_thread);
3635
3636 __ call(RuntimeAddress(destination));
3637
3638
3639 // Set an oopmap for the call site.
3640 // We need this not only for callee-saved registers, but also for volatile
3641 // registers that the compiler might be keeping live across a safepoint.
3642
3643 oop_maps->add_gc_map( __ offset() - start, map);
3644
3645 // rax contains the address we are going to jump to assuming no exception got installed
3646
3647 // clear last_Java_sp
3648 __ reset_last_Java_frame(false);
3649 // check for pending exceptions
3650 Label pending;
3651 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3652 __ jcc(Assembler::notEqual, pending);
3653
3654 // get the returned Method*
3655 __ get_vm_result_2(rbx, r15_thread);
3656 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3657
3658 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3659
3660 RegisterSaver::restore_live_registers(masm);
3661
3662 // We are back the the original state on entry and ready to go.
3663
3664 __ jmp(rax);
3665
3666 // Pending exception after the safepoint
3667
3668 __ bind(pending);
3669
3670 RegisterSaver::restore_live_registers(masm);
3671
3672 // exception pending => remove activation and forward to exception handler
3673
3674 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3675
3676 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3677 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3678
3679 // -------------
3680 // make sure all code is generated
3681 masm->flush();
3682
3683 // return the blob
3684 // frame_size_words or bytes??
3685 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3686 }
3687
3688
3689 //------------------------------Montgomery multiplication------------------------
3690 //
3691
3692 #ifndef _WINDOWS
3693
3694 #define ASM_SUBTRACT
3695
3696 #ifdef ASM_SUBTRACT
3697 // Subtract 0:b from carry:a. Return carry.
3698 static unsigned long
3699 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3700 long i = 0, cnt = len;
3701 unsigned long tmp;
3702 asm volatile("clc; "
3703 "0: ; "
3704 "mov (%[b], %[i], 8), %[tmp]; "
3705 "sbb %[tmp], (%[a], %[i], 8); "
3706 "inc %[i]; dec %[cnt]; "
3707 "jne 0b; "
3708 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3709 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3710 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3711 : "memory");
3712 return tmp;
3713 }
3714 #else // ASM_SUBTRACT
3715 typedef int __attribute__((mode(TI))) int128;
3716
3717 // Subtract 0:b from carry:a. Return carry.
3718 static unsigned long
3719 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3720 int128 tmp = 0;
3721 int i;
3722 for (i = 0; i < len; i++) {
3723 tmp += a[i];
3724 tmp -= b[i];
3725 a[i] = tmp;
3726 tmp >>= 64;
3727 assert(-1 <= tmp && tmp <= 0, "invariant");
3728 }
3729 return tmp + carry;
3730 }
3731 #endif // ! ASM_SUBTRACT
3732
3733 // Multiply (unsigned) Long A by Long B, accumulating the double-
3734 // length result into the accumulator formed of T0, T1, and T2.
3735 #define MACC(A, B, T0, T1, T2) \
3736 do { \
3737 unsigned long hi, lo; \
3738 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3739 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3740 : "r"(A), "a"(B) : "cc"); \
3741 } while(0)
3742
3743 // As above, but add twice the double-length result into the
3744 // accumulator.
3745 #define MACC2(A, B, T0, T1, T2) \
3746 do { \
3747 unsigned long hi, lo; \
3748 __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3749 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
3750 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
3751 : "r"(A), "a"(B) : "cc"); \
3752 } while(0)
3753
3754 // Fast Montgomery multiplication. The derivation of the algorithm is
3755 // in A Cryptographic Library for the Motorola DSP56000,
3756 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3757
3758 static void __attribute__((noinline))
3759 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3760 unsigned long m[], unsigned long inv, int len) {
3761 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3762 int i;
3763
3764 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3765
3766 for (i = 0; i < len; i++) {
3767 int j;
3768 for (j = 0; j < i; j++) {
3769 MACC(a[j], b[i-j], t0, t1, t2);
3770 MACC(m[j], n[i-j], t0, t1, t2);
3771 }
3772 MACC(a[i], b[0], t0, t1, t2);
3773 m[i] = t0 * inv;
3774 MACC(m[i], n[0], t0, t1, t2);
3775
3776 assert(t0 == 0, "broken Montgomery multiply");
3777
3778 t0 = t1; t1 = t2; t2 = 0;
3779 }
3780
3781 for (i = len; i < 2*len; i++) {
3782 int j;
3783 for (j = i-len+1; j < len; j++) {
3784 MACC(a[j], b[i-j], t0, t1, t2);
3785 MACC(m[j], n[i-j], t0, t1, t2);
3786 }
3787 m[i-len] = t0;
3788 t0 = t1; t1 = t2; t2 = 0;
3789 }
3790
3791 while (t0)
3792 t0 = sub(m, n, t0, len);
3793 }
3794
3795 // Fast Montgomery squaring. This uses asymptotically 25% fewer
3796 // multiplies so it should be up to 25% faster than Montgomery
3797 // multiplication. However, its loop control is more complex and it
3798 // may actually run slower on some machines.
3799
3800 static void __attribute__((noinline))
3801 montgomery_square(unsigned long a[], unsigned long n[],
3802 unsigned long m[], unsigned long inv, int len) {
3803 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3804 int i;
3805
3806 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3807
3808 for (i = 0; i < len; i++) {
3809 int j;
3810 int end = (i+1)/2;
3811 for (j = 0; j < end; j++) {
3812 MACC2(a[j], a[i-j], t0, t1, t2);
3813 MACC(m[j], n[i-j], t0, t1, t2);
3814 }
3815 if ((i & 1) == 0) {
3816 MACC(a[j], a[j], t0, t1, t2);
3817 }
3818 for (; j < i; j++) {
3819 MACC(m[j], n[i-j], t0, t1, t2);
3820 }
3821 m[i] = t0 * inv;
3822 MACC(m[i], n[0], t0, t1, t2);
3823
3824 assert(t0 == 0, "broken Montgomery square");
3825
3826 t0 = t1; t1 = t2; t2 = 0;
3827 }
3828
3829 for (i = len; i < 2*len; i++) {
3830 int start = i-len+1;
3831 int end = start + (len - start)/2;
3832 int j;
3833 for (j = start; j < end; j++) {
3834 MACC2(a[j], a[i-j], t0, t1, t2);
3835 MACC(m[j], n[i-j], t0, t1, t2);
3836 }
3837 if ((i & 1) == 0) {
3838 MACC(a[j], a[j], t0, t1, t2);
3839 }
3840 for (; j < len; j++) {
3841 MACC(m[j], n[i-j], t0, t1, t2);
3842 }
3843 m[i-len] = t0;
3844 t0 = t1; t1 = t2; t2 = 0;
3845 }
3846
3847 while (t0)
3848 t0 = sub(m, n, t0, len);
3849 }
3850
3851 // Swap words in a longword.
3852 static unsigned long swap(unsigned long x) {
3853 return (x << 32) | (x >> 32);
3854 }
3855
3856 // Copy len longwords from s to d, word-swapping as we go. The
3857 // destination array is reversed.
3858 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3859 d += len;
3860 while(len-- > 0) {
3861 d--;
3862 *d = swap(*s);
3863 s++;
3864 }
3865 }
3866
3867 // The threshold at which squaring is advantageous was determined
3868 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3869 #define MONTGOMERY_SQUARING_THRESHOLD 64
3870
3871 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3872 jint len, jlong inv,
3873 jint *m_ints) {
3874 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3875 int longwords = len/2;
3876
3877 // Make very sure we don't use so much space that the stack might
3878 // overflow. 512 jints corresponds to an 16384-bit integer and
3879 // will use here a total of 8k bytes of stack space.
3880 int total_allocation = longwords * sizeof (unsigned long) * 4;
3881 guarantee(total_allocation <= 8192, "must be");
3882 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3883
3884 // Local scratch arrays
3885 unsigned long
3886 *a = scratch + 0 * longwords,
3887 *b = scratch + 1 * longwords,
3888 *n = scratch + 2 * longwords,
3889 *m = scratch + 3 * longwords;
3890
3891 reverse_words((unsigned long *)a_ints, a, longwords);
3892 reverse_words((unsigned long *)b_ints, b, longwords);
3893 reverse_words((unsigned long *)n_ints, n, longwords);
3894
3895 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3896
3897 reverse_words(m, (unsigned long *)m_ints, longwords);
3898 }
3899
3900 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3901 jint len, jlong inv,
3902 jint *m_ints) {
3903 assert(len % 2 == 0, "array length in montgomery_square must be even");
3904 int longwords = len/2;
3905
3906 // Make very sure we don't use so much space that the stack might
3907 // overflow. 512 jints corresponds to an 16384-bit integer and
3908 // will use here a total of 6k bytes of stack space.
3909 int total_allocation = longwords * sizeof (unsigned long) * 3;
3910 guarantee(total_allocation <= 8192, "must be");
3911 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3912
3913 // Local scratch arrays
3914 unsigned long
3915 *a = scratch + 0 * longwords,
3916 *n = scratch + 1 * longwords,
3917 *m = scratch + 2 * longwords;
3918
3919 reverse_words((unsigned long *)a_ints, a, longwords);
3920 reverse_words((unsigned long *)n_ints, n, longwords);
3921
3922 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3923 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3924 } else {
3925 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3926 }
3927
3928 reverse_words(m, (unsigned long *)m_ints, longwords);
3929 }
3930
3931 #endif // WINDOWS
3932
3933 #ifdef COMPILER2
3934 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3935 //
3936 //------------------------------generate_exception_blob---------------------------
3937 // creates exception blob at the end
3938 // Using exception blob, this code is jumped from a compiled method.
3939 // (see emit_exception_handler in x86_64.ad file)
3940 //
3941 // Given an exception pc at a call we call into the runtime for the
3942 // handler in this method. This handler might merely restore state
3943 // (i.e. callee save registers) unwind the frame and jump to the
3944 // exception handler for the nmethod if there is no Java level handler
3945 // for the nmethod.
3946 //
3947 // This code is entered with a jmp.
3948 //
3949 // Arguments:
3950 // rax: exception oop
3951 // rdx: exception pc
3952 //
3953 // Results:
3954 // rax: exception oop
3955 // rdx: exception pc in caller or ???
3956 // destination: exception handler of caller
3957 //
3958 // Note: the exception pc MUST be at a call (precise debug information)
3959 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3960 //
3961
3962 void OptoRuntime::generate_exception_blob() {
3963 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3964 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3965 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3966
3967 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3968
3969 // Allocate space for the code
3970 ResourceMark rm;
3971 // Setup code generation tools
3972 CodeBuffer buffer("exception_blob", 2048, 1024);
3973 MacroAssembler* masm = new MacroAssembler(&buffer);
3974
3975
3976 address start = __ pc();
3977
3978 // Exception pc is 'return address' for stack walker
3979 __ push(rdx);
3980 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3981
3982 // Save callee-saved registers. See x86_64.ad.
3983
3984 // rbp is an implicitly saved callee saved register (i.e., the calling
3985 // convention will save/restore it in the prolog/epilog). Other than that
3986 // there are no callee save registers now that adapter frames are gone.
3987
3988 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3989
3990 // Store exception in Thread object. We cannot pass any arguments to the
3991 // handle_exception call, since we do not want to make any assumption
3992 // about the size of the frame where the exception happened in.
3993 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3994 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3995 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3996
3997 // This call does all the hard work. It checks if an exception handler
3998 // exists in the method.
3999 // If so, it returns the handler address.
4000 // If not, it prepares for stack-unwinding, restoring the callee-save
4001 // registers of the frame being removed.
4002 //
4003 // address OptoRuntime::handle_exception_C(JavaThread* thread)
4004
4005 // At a method handle call, the stack may not be properly aligned
4006 // when returning with an exception.
4007 address the_pc = __ pc();
4008 __ set_last_Java_frame(noreg, noreg, the_pc);
4009 __ mov(c_rarg0, r15_thread);
4010 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
4011 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4012
4013 // Set an oopmap for the call site. This oopmap will only be used if we
4014 // are unwinding the stack. Hence, all locations will be dead.
4015 // Callee-saved registers will be the same as the frame above (i.e.,
4016 // handle_exception_stub), since they were restored when we got the
4017 // exception.
4018
4019 OopMapSet* oop_maps = new OopMapSet();
4020
4021 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4022
4023 __ reset_last_Java_frame(false);
4024
4025 // Restore callee-saved registers
4026
4027 // rbp is an implicitly saved callee-saved register (i.e., the calling
4028 // convention will save restore it in prolog/epilog) Other than that
4029 // there are no callee save registers now that adapter frames are gone.
4030
4031 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4032
4033 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4034 __ pop(rdx); // No need for exception pc anymore
4035
4036 // rax: exception handler
4037
4038 // We have a handler in rax (could be deopt blob).
4039 __ mov(r8, rax);
4040
4041 // Get the exception oop
4042 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4043 // Get the exception pc in case we are deoptimized
4044 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4045 #ifdef ASSERT
4046 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4047 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4048 #endif
4049 // Clear the exception oop so GC no longer processes it as a root.
4050 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4051
4052 // rax: exception oop
4053 // r8: exception handler
4054 // rdx: exception pc
4055 // Jump to handler
4056
4057 __ jmp(r8);
4058
4059 // Make sure all code is generated
4060 masm->flush();
4061
4062 // Set exception blob
4063 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4064 }
4065 #endif // COMPILER2
--- EOF ---