1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/interp_masm.hpp"
30 #include "interpreter/templateTable.hpp"
31 #include "memory/universe.inline.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "utilities/macros.hpp"
40
41 #define __ _masm->
42
43 // Global Register Names
44 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
45 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
46
47 // Platform-dependent initialization
48 void TemplateTable::pd_initialize() {
49 // No x86 specific initialization
50 }
51
52 // Address Computation: local variables
53 static inline Address iaddress(int n) {
54 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
55 }
56
57 static inline Address laddress(int n) {
58 return iaddress(n + 1);
59 }
60
61 #ifndef _LP64
62 static inline Address haddress(int n) {
63 return iaddress(n + 0);
64 }
65 #endif
66
67 static inline Address faddress(int n) {
68 return iaddress(n);
69 }
70
71 static inline Address daddress(int n) {
72 return laddress(n);
73 }
74
75 static inline Address aaddress(int n) {
76 return iaddress(n);
77 }
78
79 static inline Address iaddress(Register r) {
80 return Address(rlocals, r, Address::times_ptr);
81 }
82
83 static inline Address laddress(Register r) {
84 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
85 }
86
87 #ifndef _LP64
88 static inline Address haddress(Register r) {
89 return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
90 }
91 #endif
92
93 static inline Address faddress(Register r) {
94 return iaddress(r);
95 }
96
97 static inline Address daddress(Register r) {
98 return laddress(r);
99 }
100
101 static inline Address aaddress(Register r) {
102 return iaddress(r);
103 }
104
105
106 // expression stack
107 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
108 // data beyond the rsp which is potentially unsafe in an MT environment;
109 // an interrupt may overwrite that data.)
110 static inline Address at_rsp () {
111 return Address(rsp, 0);
112 }
113
114 // At top of Java expression stack which may be different than esp(). It
115 // isn't for category 1 objects.
116 static inline Address at_tos () {
117 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
118 }
119
120 static inline Address at_tos_p1() {
121 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
122 }
123
124 static inline Address at_tos_p2() {
125 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
126 }
127
128 // Condition conversion
129 static Assembler::Condition j_not(TemplateTable::Condition cc) {
130 switch (cc) {
131 case TemplateTable::equal : return Assembler::notEqual;
132 case TemplateTable::not_equal : return Assembler::equal;
133 case TemplateTable::less : return Assembler::greaterEqual;
134 case TemplateTable::less_equal : return Assembler::greater;
135 case TemplateTable::greater : return Assembler::lessEqual;
136 case TemplateTable::greater_equal: return Assembler::less;
137 }
138 ShouldNotReachHere();
139 return Assembler::zero;
140 }
141
142
143
144 // Miscelaneous helper routines
145 // Store an oop (or NULL) at the address described by obj.
146 // If val == noreg this means store a NULL
147
148
149 static void do_oop_store(InterpreterMacroAssembler* _masm,
150 Address obj,
151 Register val,
152 BarrierSet::Name barrier,
153 bool precise) {
154 assert(val == noreg || val == rax, "parameter is just for looks");
155 switch (barrier) {
156 #if INCLUDE_ALL_GCS
157 case BarrierSet::G1SATBCTLogging:
158 {
159 // flatten object address if needed
160 // We do it regardless of precise because we need the registers
161 if (obj.index() == noreg && obj.disp() == 0) {
162 if (obj.base() != rdx) {
163 __ movptr(rdx, obj.base());
164 }
165 } else {
166 __ lea(rdx, obj);
167 }
168
169 Register rtmp = LP64_ONLY(r8) NOT_LP64(rsi);
170 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
171
172 NOT_LP64(__ get_thread(rcx));
173 NOT_LP64(__ save_bcp());
174
175 __ g1_write_barrier_pre(rdx /* obj */,
176 rbx /* pre_val */,
177 rthread /* thread */,
178 rtmp /* tmp */,
179 val != noreg /* tosca_live */,
180 false /* expand_call */);
181 if (val == noreg) {
182 __ store_heap_oop_null(Address(rdx, 0));
183 } else {
184 // G1 barrier needs uncompressed oop for region cross check.
185 Register new_val = val;
186 if (UseCompressedOops) {
187 new_val = rbx;
188 __ movptr(new_val, val);
189 }
190 __ store_heap_oop(Address(rdx, 0), val);
191 __ g1_write_barrier_post(rdx /* store_adr */,
192 new_val /* new_val */,
193 rthread /* thread */,
194 rtmp /* tmp */,
195 rbx /* tmp2 */);
196 }
197 NOT_LP64( __ restore_bcp());
198 }
199 break;
200 #endif // INCLUDE_ALL_GCS
201 case BarrierSet::CardTableForRS:
202 case BarrierSet::CardTableExtension:
203 {
204 if (val == noreg) {
205 __ store_heap_oop_null(obj);
206 } else {
207 __ store_heap_oop(obj, val);
208 // flatten object address if needed
209 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
210 __ store_check(obj.base());
211 } else {
212 __ lea(rdx, obj);
213 __ store_check(rdx);
214 }
215 }
216 }
217 break;
218 case BarrierSet::ModRef:
219 if (val == noreg) {
220 __ store_heap_oop_null(obj);
221 } else {
222 __ store_heap_oop(obj, val);
223 }
224 break;
225 default :
226 ShouldNotReachHere();
227
228 }
229 }
230
231 Address TemplateTable::at_bcp(int offset) {
232 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
233 return Address(rbcp, offset);
234 }
235
236
237 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
238 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
239 int byte_no) {
240 if (!RewriteBytecodes) return;
241 Label L_patch_done;
242
243 switch (bc) {
244 case Bytecodes::_fast_aputfield:
245 case Bytecodes::_fast_qputfield:
246 case Bytecodes::_fast_bputfield:
247 case Bytecodes::_fast_cputfield:
248 case Bytecodes::_fast_dputfield:
249 case Bytecodes::_fast_fputfield:
250 case Bytecodes::_fast_iputfield:
251 case Bytecodes::_fast_lputfield:
252 case Bytecodes::_fast_sputfield:
253 {
254 // We skip bytecode quickening for putfield instructions when
255 // the put_code written to the constant pool cache is zero.
256 // This is required so that every execution of this instruction
257 // calls out to InterpreterRuntime::resolve_get_put to do
258 // additional, required work.
259 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
260 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
261 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
262 __ movl(bc_reg, bc);
263 __ cmpl(temp_reg, (int) 0);
264 __ jcc(Assembler::zero, L_patch_done); // don't patch
265 }
266 break;
267 default:
268 assert(byte_no == -1, "sanity");
269 // the pair bytecodes have already done the load.
270 if (load_bc_into_bc_reg) {
271 __ movl(bc_reg, bc);
272 }
273 }
274
275 if (JvmtiExport::can_post_breakpoint()) {
276 Label L_fast_patch;
277 // if a breakpoint is present we can't rewrite the stream directly
278 __ movzbl(temp_reg, at_bcp(0));
279 __ cmpl(temp_reg, Bytecodes::_breakpoint);
280 __ jcc(Assembler::notEqual, L_fast_patch);
281 __ get_method(temp_reg);
282 // Let breakpoint table handling rewrite to quicker bytecode
283 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
284 #ifndef ASSERT
285 __ jmpb(L_patch_done);
286 #else
287 __ jmp(L_patch_done);
288 #endif
289 __ bind(L_fast_patch);
290 }
291
292 #ifdef ASSERT
293 Label L_okay;
294 __ load_unsigned_byte(temp_reg, at_bcp(0));
295 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
296 __ jcc(Assembler::equal, L_okay);
297 __ cmpl(temp_reg, bc_reg);
298 __ jcc(Assembler::equal, L_okay);
299 __ stop("patching the wrong bytecode");
300 __ bind(L_okay);
301 #endif
302
303 // patch bytecode
304 __ movb(at_bcp(0), bc_reg);
305 __ bind(L_patch_done);
306 }
307 // Individual instructions
308
309
310 void TemplateTable::nop() {
311 transition(vtos, vtos);
312 // nothing to do
313 }
314
315 void TemplateTable::shouldnotreachhere() {
316 transition(vtos, vtos);
317 __ stop("shouldnotreachhere bytecode");
318 }
319
320 void TemplateTable::aconst_null() {
321 transition(vtos, atos);
322 __ xorl(rax, rax);
323 }
324
325 void TemplateTable::iconst(int value) {
326 transition(vtos, itos);
327 if (value == 0) {
328 __ xorl(rax, rax);
329 } else {
330 __ movl(rax, value);
331 }
332 }
333
334 void TemplateTable::lconst(int value) {
335 transition(vtos, ltos);
336 if (value == 0) {
337 __ xorl(rax, rax);
338 } else {
339 __ movl(rax, value);
340 }
341 #ifndef _LP64
342 assert(value >= 0, "check this code");
343 __ xorptr(rdx, rdx);
344 #endif
345 }
346
347
348
349 void TemplateTable::fconst(int value) {
350 transition(vtos, ftos);
351 if (UseSSE >= 1) {
352 static float one = 1.0f, two = 2.0f;
353 switch (value) {
354 case 0:
355 __ xorps(xmm0, xmm0);
356 break;
357 case 1:
358 __ movflt(xmm0, ExternalAddress((address) &one));
359 break;
360 case 2:
361 __ movflt(xmm0, ExternalAddress((address) &two));
362 break;
363 default:
364 ShouldNotReachHere();
365 break;
366 }
367 } else {
368 #ifdef _LP64
369 ShouldNotReachHere();
370 #else
371 if (value == 0) { __ fldz();
372 } else if (value == 1) { __ fld1();
373 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
374 } else { ShouldNotReachHere();
375 }
376 #endif // _LP64
377 }
378 }
379
380 void TemplateTable::dconst(int value) {
381 transition(vtos, dtos);
382 if (UseSSE >= 2) {
383 static double one = 1.0;
384 switch (value) {
385 case 0:
386 __ xorpd(xmm0, xmm0);
387 break;
388 case 1:
389 __ movdbl(xmm0, ExternalAddress((address) &one));
390 break;
391 default:
392 ShouldNotReachHere();
393 break;
394 }
395 } else {
396 #ifdef _LP64
397 ShouldNotReachHere();
398 #else
399 if (value == 0) { __ fldz();
400 } else if (value == 1) { __ fld1();
401 } else { ShouldNotReachHere();
402 }
403 #endif
404 }
405 }
406
407 void TemplateTable::bipush() {
408 transition(vtos, itos);
409 __ load_signed_byte(rax, at_bcp(1));
410 }
411
412 void TemplateTable::sipush() {
413 transition(vtos, itos);
414 __ load_unsigned_short(rax, at_bcp(1));
415 __ bswapl(rax);
416 __ sarl(rax, 16);
417 }
418
419 void TemplateTable::ldc(bool wide) {
420 transition(vtos, vtos);
421 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
422 Label call_ldc, notFloat, notClass, Done;
423
424 if (wide) {
425 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
426 } else {
427 __ load_unsigned_byte(rbx, at_bcp(1));
428 }
429
430 __ get_cpool_and_tags(rcx, rax);
431 const int base_offset = ConstantPool::header_size() * wordSize;
432 const int tags_offset = Array<u1>::base_offset_in_bytes();
433
434 // get type
435 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
436
437 // unresolved class - get the resolved class
438 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
439 __ jccb(Assembler::equal, call_ldc);
440
441 // unresolved class in error state - call into runtime to throw the error
442 // from the first resolution attempt
443 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
444 __ jccb(Assembler::equal, call_ldc);
445
446 // resolved class - need to call vm to get java mirror of the class
447 __ cmpl(rdx, JVM_CONSTANT_Class);
448 __ jcc(Assembler::notEqual, notClass);
449
450 __ bind(call_ldc);
451
452 __ movl(rarg, wide);
453 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
454
455 __ push(atos);
456 __ jmp(Done);
457
458 __ bind(notClass);
459 __ cmpl(rdx, JVM_CONSTANT_Float);
460 __ jccb(Assembler::notEqual, notFloat);
461
462 // ftos
463 __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
464 __ push(ftos);
465 __ jmp(Done);
466
467 __ bind(notFloat);
468 #ifdef ASSERT
469 {
470 Label L;
471 __ cmpl(rdx, JVM_CONSTANT_Integer);
472 __ jcc(Assembler::equal, L);
473 // String and Object are rewritten to fast_aldc
474 __ stop("unexpected tag type in ldc");
475 __ bind(L);
476 }
477 #endif
478 // itos JVM_CONSTANT_Integer only
479 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
480 __ push(itos);
481 __ bind(Done);
482 }
483
484 // Fast path for caching oop constants.
485 void TemplateTable::fast_aldc(bool wide) {
486 transition(vtos, atos);
487
488 Register result = rax;
489 Register tmp = rdx;
490 int index_size = wide ? sizeof(u2) : sizeof(u1);
491
492 Label resolved;
493
494 // We are resolved if the resolved reference cache entry contains a
495 // non-null object (String, MethodType, etc.)
496 assert_different_registers(result, tmp);
497 __ get_cache_index_at_bcp(tmp, 1, index_size);
498 __ load_resolved_reference_at_index(result, tmp);
499 __ testl(result, result);
500 __ jcc(Assembler::notZero, resolved);
501
502 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
503
504 // first time invocation - must resolve first
505 __ movl(tmp, (int)bytecode());
506 __ call_VM(result, entry, tmp);
507
508 __ bind(resolved);
509
510 if (VerifyOops) {
511 __ verify_oop(result);
512 }
513 }
514
515 void TemplateTable::ldc2_w() {
516 transition(vtos, vtos);
517 Label Long, Done;
518 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
519
520 __ get_cpool_and_tags(rcx, rax);
521 const int base_offset = ConstantPool::header_size() * wordSize;
522 const int tags_offset = Array<u1>::base_offset_in_bytes();
523
524 // get type
525 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
526 JVM_CONSTANT_Double);
527 __ jccb(Assembler::notEqual, Long);
528
529 // dtos
530 __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
531 __ push(dtos);
532
533 __ jmpb(Done);
534 __ bind(Long);
535
536 // ltos
537 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
538 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
539 __ push(ltos);
540
541 __ bind(Done);
542 }
543
544 void TemplateTable::locals_index(Register reg, int offset) {
545 __ load_unsigned_byte(reg, at_bcp(offset));
546 __ negptr(reg);
547 }
548
549 void TemplateTable::iload() {
550 iload_internal();
551 }
552
553 void TemplateTable::nofast_iload() {
554 iload_internal(may_not_rewrite);
555 }
556
557 void TemplateTable::iload_internal(RewriteControl rc) {
558 transition(vtos, itos);
559 if (RewriteFrequentPairs && rc == may_rewrite) {
560 Label rewrite, done;
561 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
562 LP64_ONLY(assert(rbx != bc, "register damaged"));
563
564 // get next byte
565 __ load_unsigned_byte(rbx,
566 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
567 // if _iload, wait to rewrite to iload2. We only want to rewrite the
568 // last two iloads in a pair. Comparing against fast_iload means that
569 // the next bytecode is neither an iload or a caload, and therefore
570 // an iload pair.
571 __ cmpl(rbx, Bytecodes::_iload);
572 __ jcc(Assembler::equal, done);
573
574 __ cmpl(rbx, Bytecodes::_fast_iload);
575 __ movl(bc, Bytecodes::_fast_iload2);
576
577 __ jccb(Assembler::equal, rewrite);
578
579 // if _caload, rewrite to fast_icaload
580 __ cmpl(rbx, Bytecodes::_caload);
581 __ movl(bc, Bytecodes::_fast_icaload);
582 __ jccb(Assembler::equal, rewrite);
583
584 // rewrite so iload doesn't check again.
585 __ movl(bc, Bytecodes::_fast_iload);
586
587 // rewrite
588 // bc: fast bytecode
589 __ bind(rewrite);
590 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
591 __ bind(done);
592 }
593
594 // Get the local value into tos
595 locals_index(rbx);
596 __ movl(rax, iaddress(rbx));
597 }
598
599 void TemplateTable::fast_iload2() {
600 transition(vtos, itos);
601 locals_index(rbx);
602 __ movl(rax, iaddress(rbx));
603 __ push(itos);
604 locals_index(rbx, 3);
605 __ movl(rax, iaddress(rbx));
606 }
607
608 void TemplateTable::fast_iload() {
609 transition(vtos, itos);
610 locals_index(rbx);
611 __ movl(rax, iaddress(rbx));
612 }
613
614 void TemplateTable::lload() {
615 transition(vtos, ltos);
616 locals_index(rbx);
617 __ movptr(rax, laddress(rbx));
618 NOT_LP64(__ movl(rdx, haddress(rbx)));
619 }
620
621 void TemplateTable::fload() {
622 transition(vtos, ftos);
623 locals_index(rbx);
624 __ load_float(faddress(rbx));
625 }
626
627 void TemplateTable::dload() {
628 transition(vtos, dtos);
629 locals_index(rbx);
630 __ load_double(daddress(rbx));
631 }
632
633 void TemplateTable::aload() {
634 transition(vtos, atos);
635 locals_index(rbx);
636 __ movptr(rax, aaddress(rbx));
637 }
638
639 void TemplateTable::vload() {
640 transition(vtos, qtos);
641 locals_index(rbx);
642 __ movptr(rax, aaddress(rbx));
643 }
644
645 void TemplateTable::locals_index_wide(Register reg) {
646 __ load_unsigned_short(reg, at_bcp(2));
647 __ bswapl(reg);
648 __ shrl(reg, 16);
649 __ negptr(reg);
650 }
651
652 void TemplateTable::wide_iload() {
653 transition(vtos, itos);
654 locals_index_wide(rbx);
655 __ movl(rax, iaddress(rbx));
656 }
657
658 void TemplateTable::wide_lload() {
659 transition(vtos, ltos);
660 locals_index_wide(rbx);
661 __ movptr(rax, laddress(rbx));
662 NOT_LP64(__ movl(rdx, haddress(rbx)));
663 }
664
665 void TemplateTable::wide_fload() {
666 transition(vtos, ftos);
667 locals_index_wide(rbx);
668 __ load_float(faddress(rbx));
669 }
670
671 void TemplateTable::wide_dload() {
672 transition(vtos, dtos);
673 locals_index_wide(rbx);
674 __ load_double(daddress(rbx));
675 }
676
677 void TemplateTable::wide_aload() {
678 transition(vtos, atos);
679 locals_index_wide(rbx);
680 __ movptr(rax, aaddress(rbx));
681 }
682
683 void TemplateTable::wide_vload() {
684 transition(vtos, qtos);
685 locals_index_wide(rbx);
686 __ movptr(rax, aaddress(rbx));
687 }
688
689 void TemplateTable::index_check(Register array, Register index) {
690 // Pop ptr into array
691 __ pop_ptr(array);
692 index_check_without_pop(array, index);
693 }
694
695 void TemplateTable::index_check_without_pop(Register array, Register index) {
696 // destroys rbx
697 // check array
698 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
699 // sign extend index for use by indexed load
700 __ movl2ptr(index, index);
701 // check index
702 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
703 if (index != rbx) {
704 // ??? convention: move aberrant index into rbx for exception message
705 assert(rbx != array, "different registers");
706 __ movl(rbx, index);
707 }
708 __ jump_cc(Assembler::aboveEqual,
709 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
710 }
711
712
713 void TemplateTable::iaload() {
714 transition(itos, itos);
715 // rax: index
716 // rdx: array
717 index_check(rdx, rax); // kills rbx
718 __ movl(rax, Address(rdx, rax,
719 Address::times_4,
720 arrayOopDesc::base_offset_in_bytes(T_INT)));
721 }
722
723 void TemplateTable::laload() {
724 transition(itos, ltos);
725 // rax: index
726 // rdx: array
727 index_check(rdx, rax); // kills rbx
728 NOT_LP64(__ mov(rbx, rax));
729 // rbx,: index
730 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
731 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
732 }
733
734
735
736 void TemplateTable::faload() {
737 transition(itos, ftos);
738 // rax: index
739 // rdx: array
740 index_check(rdx, rax); // kills rbx
741 __ load_float(Address(rdx, rax,
742 Address::times_4,
743 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
744 }
745
746 void TemplateTable::daload() {
747 transition(itos, dtos);
748 // rax: index
749 // rdx: array
750 index_check(rdx, rax); // kills rbx
751 __ load_double(Address(rdx, rax,
752 Address::times_8,
753 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
754 }
755
756 void TemplateTable::aaload() {
757 transition(itos, atos);
758 // rax: index
759 // rdx: array
760 index_check(rdx, rax); // kills rbx
761 __ load_heap_oop(rax, Address(rdx, rax,
762 UseCompressedOops ? Address::times_4 : Address::times_ptr,
763 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
764 }
765
766 void TemplateTable::vaload() {
767 transition(itos, qtos);
768
769 Register array = rcx;
770 Register index = rax;
771
772 index_check(array, index); // kills rbx, pops array
773
774 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load) , array, index);
775 }
776
777 void TemplateTable::baload() {
778 transition(itos, itos);
779 // rax: index
780 // rdx: array
781 index_check(rdx, rax); // kills rbx
782 __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
783 }
784
785 void TemplateTable::caload() {
786 transition(itos, itos);
787 // rax: index
788 // rdx: array
789 index_check(rdx, rax); // kills rbx
790 __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
791 }
792
793 // iload followed by caload frequent pair
794 void TemplateTable::fast_icaload() {
795 transition(vtos, itos);
796 // load index out of locals
797 locals_index(rbx);
798 __ movl(rax, iaddress(rbx));
799
800 // rax: index
801 // rdx: array
802 index_check(rdx, rax); // kills rbx
803 __ load_unsigned_short(rax,
804 Address(rdx, rax,
805 Address::times_2,
806 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
807 }
808
809
810 void TemplateTable::saload() {
811 transition(itos, itos);
812 // rax: index
813 // rdx: array
814 index_check(rdx, rax); // kills rbx
815 __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
816 }
817
818 void TemplateTable::iload(int n) {
819 transition(vtos, itos);
820 __ movl(rax, iaddress(n));
821 }
822
823 void TemplateTable::lload(int n) {
824 transition(vtos, ltos);
825 __ movptr(rax, laddress(n));
826 NOT_LP64(__ movptr(rdx, haddress(n)));
827 }
828
829 void TemplateTable::fload(int n) {
830 transition(vtos, ftos);
831 __ load_float(faddress(n));
832 }
833
834 void TemplateTable::dload(int n) {
835 transition(vtos, dtos);
836 __ load_double(daddress(n));
837 }
838
839 void TemplateTable::aload(int n) {
840 transition(vtos, atos);
841 __ movptr(rax, aaddress(n));
842 }
843
844 void TemplateTable::aload_0() {
845 aload_0_internal();
846 }
847
848 void TemplateTable::nofast_aload_0() {
849 aload_0_internal(may_not_rewrite);
850 }
851
852 void TemplateTable::aload_0_internal(RewriteControl rc) {
853 transition(vtos, atos);
854 // According to bytecode histograms, the pairs:
855 //
856 // _aload_0, _fast_igetfield
857 // _aload_0, _fast_agetfield
858 // _aload_0, _fast_fgetfield
859 //
860 // occur frequently. If RewriteFrequentPairs is set, the (slow)
861 // _aload_0 bytecode checks if the next bytecode is either
862 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
863 // rewrites the current bytecode into a pair bytecode; otherwise it
864 // rewrites the current bytecode into _fast_aload_0 that doesn't do
865 // the pair check anymore.
866 //
867 // Note: If the next bytecode is _getfield, the rewrite must be
868 // delayed, otherwise we may miss an opportunity for a pair.
869 //
870 // Also rewrite frequent pairs
871 // aload_0, aload_1
872 // aload_0, iload_1
873 // These bytecodes with a small amount of code are most profitable
874 // to rewrite
875 if (RewriteFrequentPairs && rc == may_rewrite) {
876 Label rewrite, done;
877
878 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
879 LP64_ONLY(assert(rbx != bc, "register damaged"));
880
881 // get next byte
882 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
883
884 // do actual aload_0
885 aload(0);
886
887 // if _getfield then wait with rewrite
888 __ cmpl(rbx, Bytecodes::_getfield);
889 __ jcc(Assembler::equal, done);
890
891 // if _igetfield then reqrite to _fast_iaccess_0
892 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
893 __ cmpl(rbx, Bytecodes::_fast_igetfield);
894 __ movl(bc, Bytecodes::_fast_iaccess_0);
895 __ jccb(Assembler::equal, rewrite);
896
897 // if _agetfield then reqrite to _fast_aaccess_0
898 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
899 __ cmpl(rbx, Bytecodes::_fast_agetfield);
900 __ movl(bc, Bytecodes::_fast_aaccess_0);
901 __ jccb(Assembler::equal, rewrite);
902
903 // if _fgetfield then reqrite to _fast_faccess_0
904 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
905 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
906 __ movl(bc, Bytecodes::_fast_faccess_0);
907 __ jccb(Assembler::equal, rewrite);
908
909 // else rewrite to _fast_aload0
910 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
911 __ movl(bc, Bytecodes::_fast_aload_0);
912
913 // rewrite
914 // bc: fast bytecode
915 __ bind(rewrite);
916 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
917
918 __ bind(done);
919 } else {
920 aload(0);
921 }
922 }
923
924 void TemplateTable::istore() {
925 transition(itos, vtos);
926 locals_index(rbx);
927 __ movl(iaddress(rbx), rax);
928 }
929
930
931 void TemplateTable::lstore() {
932 transition(ltos, vtos);
933 locals_index(rbx);
934 __ movptr(laddress(rbx), rax);
935 NOT_LP64(__ movptr(haddress(rbx), rdx));
936 }
937
938 void TemplateTable::fstore() {
939 transition(ftos, vtos);
940 locals_index(rbx);
941 __ store_float(faddress(rbx));
942 }
943
944 void TemplateTable::dstore() {
945 transition(dtos, vtos);
946 locals_index(rbx);
947 __ store_double(daddress(rbx));
948 }
949
950 void TemplateTable::astore() {
951 transition(vtos, vtos);
952 __ pop_ptr(rax);
953 locals_index(rbx);
954 __ movptr(aaddress(rbx), rax);
955 }
956
957 void TemplateTable::vstore() {
958 transition(vtos, vtos);
959 __ pop_ptr(rax);
960 locals_index(rbx);
961 __ movptr(aaddress(rbx), rax);
962 }
963
964 void TemplateTable::wide_istore() {
965 transition(vtos, vtos);
966 __ pop_i();
967 locals_index_wide(rbx);
968 __ movl(iaddress(rbx), rax);
969 }
970
971 void TemplateTable::wide_lstore() {
972 transition(vtos, vtos);
973 NOT_LP64(__ pop_l(rax, rdx));
974 LP64_ONLY(__ pop_l());
975 locals_index_wide(rbx);
976 __ movptr(laddress(rbx), rax);
977 NOT_LP64(__ movl(haddress(rbx), rdx));
978 }
979
980 void TemplateTable::wide_fstore() {
981 #ifdef _LP64
982 transition(vtos, vtos);
983 __ pop_f(xmm0);
984 locals_index_wide(rbx);
985 __ movflt(faddress(rbx), xmm0);
986 #else
987 wide_istore();
988 #endif
989 }
990
991 void TemplateTable::wide_dstore() {
992 #ifdef _LP64
993 transition(vtos, vtos);
994 __ pop_d(xmm0);
995 locals_index_wide(rbx);
996 __ movdbl(daddress(rbx), xmm0);
997 #else
998 wide_lstore();
999 #endif
1000 }
1001
1002 void TemplateTable::wide_astore() {
1003 transition(vtos, vtos);
1004 __ pop_ptr(rax);
1005 locals_index_wide(rbx);
1006 __ movptr(aaddress(rbx), rax);
1007 }
1008
1009 void TemplateTable::wide_vstore() {
1010 transition(vtos, vtos);
1011 __ pop_ptr(rax);
1012 locals_index_wide(rbx);
1013 __ movptr(aaddress(rbx), rax);
1014 }
1015
1016 void TemplateTable::iastore() {
1017 transition(itos, vtos);
1018 __ pop_i(rbx);
1019 // rax: value
1020 // rbx: index
1021 // rdx: array
1022 index_check(rdx, rbx); // prefer index in rbx
1023 __ movl(Address(rdx, rbx,
1024 Address::times_4,
1025 arrayOopDesc::base_offset_in_bytes(T_INT)),
1026 rax);
1027 }
1028
1029 void TemplateTable::lastore() {
1030 transition(ltos, vtos);
1031 __ pop_i(rbx);
1032 // rax,: low(value)
1033 // rcx: array
1034 // rdx: high(value)
1035 index_check(rcx, rbx); // prefer index in rbx,
1036 // rbx,: index
1037 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
1038 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
1039 }
1040
1041
1042 void TemplateTable::fastore() {
1043 transition(ftos, vtos);
1044 __ pop_i(rbx);
1045 // value is in UseSSE >= 1 ? xmm0 : ST(0)
1046 // rbx: index
1047 // rdx: array
1048 index_check(rdx, rbx); // prefer index in rbx
1049 __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
1050 }
1051
1052 void TemplateTable::dastore() {
1053 transition(dtos, vtos);
1054 __ pop_i(rbx);
1055 // value is in UseSSE >= 2 ? xmm0 : ST(0)
1056 // rbx: index
1057 // rdx: array
1058 index_check(rdx, rbx); // prefer index in rbx
1059 __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1060 }
1061
1062 void TemplateTable::aastore() {
1063 Label is_null, ok_is_subtype, done;
1064 transition(vtos, vtos);
1065 // stack: ..., array, index, value
1066 __ movptr(rax, at_tos()); // value
1067 __ movl(rcx, at_tos_p1()); // index
1068 __ movptr(rdx, at_tos_p2()); // array
1069
1070 Address element_address(rdx, rcx,
1071 UseCompressedOops? Address::times_4 : Address::times_ptr,
1072 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1073
1074 index_check_without_pop(rdx, rcx); // kills rbx
1075 __ testptr(rax, rax);
1076 __ jcc(Assembler::zero, is_null);
1077
1078 // Move subklass into rbx
1079 __ load_klass(rbx, rax);
1080 // Move superklass into rax
1081 __ load_klass(rax, rdx);
1082 __ movptr(rax, Address(rax,
1083 ObjArrayKlass::element_klass_offset()));
1084 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
1085 __ lea(rdx, element_address);
1086
1087 // Generate subtype check. Blows rcx, rdi
1088 // Superklass in rax. Subklass in rbx.
1089 __ gen_subtype_check(rbx, ok_is_subtype);
1090
1091 // Come here on failure
1092 // object is at TOS
1093 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1094
1095 // Come here on success
1096 __ bind(ok_is_subtype);
1097
1098 // Get the value we will store
1099 __ movptr(rax, at_tos());
1100 // Now store using the appropriate barrier
1101 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
1102 __ jmp(done);
1103
1104 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
1105 __ bind(is_null);
1106 __ profile_null_seen(rbx);
1107
1108 // Store a NULL
1109 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
1110
1111 // Pop stack arguments
1112 __ bind(done);
1113 __ addptr(rsp, 3 * Interpreter::stackElementSize);
1114 }
1115
1116 void TemplateTable::vastore() {
1117 transition(vtos, vtos);
1118
1119 Register value = rcx;
1120 Register index = rbx;
1121 Register array = rax;
1122
1123 // stack: ..., array, index, value
1124 __ pop_ptr(value);
1125 __ pop_i(index);
1126 __ pop_ptr(array);
1127
1128 index_check_without_pop(array, index);
1129
1130 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), array, index, value);
1131 }
1132
1133 void TemplateTable::bastore() {
1134 transition(itos, vtos);
1135 __ pop_i(rbx);
1136 // rax: value
1137 // rbx: index
1138 // rdx: array
1139 index_check(rdx, rbx); // prefer index in rbx
1140 __ movb(Address(rdx, rbx,
1141 Address::times_1,
1142 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1143 rax);
1144 }
1145
1146 void TemplateTable::castore() {
1147 transition(itos, vtos);
1148 __ pop_i(rbx);
1149 // rax: value
1150 // rbx: index
1151 // rdx: array
1152 index_check(rdx, rbx); // prefer index in rbx
1153 __ movw(Address(rdx, rbx,
1154 Address::times_2,
1155 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1156 rax);
1157 }
1158
1159
1160 void TemplateTable::sastore() {
1161 castore();
1162 }
1163
1164 void TemplateTable::istore(int n) {
1165 transition(itos, vtos);
1166 __ movl(iaddress(n), rax);
1167 }
1168
1169 void TemplateTable::lstore(int n) {
1170 transition(ltos, vtos);
1171 __ movptr(laddress(n), rax);
1172 NOT_LP64(__ movptr(haddress(n), rdx));
1173 }
1174
1175 void TemplateTable::fstore(int n) {
1176 transition(ftos, vtos);
1177 __ store_float(faddress(n));
1178 }
1179
1180 void TemplateTable::dstore(int n) {
1181 transition(dtos, vtos);
1182 __ store_double(daddress(n));
1183 }
1184
1185
1186 void TemplateTable::astore(int n) {
1187 transition(vtos, vtos);
1188 __ pop_ptr(rax);
1189 __ movptr(aaddress(n), rax);
1190 }
1191
1192 void TemplateTable::pop() {
1193 transition(vtos, vtos);
1194 __ addptr(rsp, Interpreter::stackElementSize);
1195 }
1196
1197 void TemplateTable::pop2() {
1198 transition(vtos, vtos);
1199 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1200 }
1201
1202
1203 void TemplateTable::dup() {
1204 transition(vtos, vtos);
1205 __ load_ptr(0, rax);
1206 __ push_ptr(rax);
1207 // stack: ..., a, a
1208 }
1209
1210 void TemplateTable::dup_x1() {
1211 transition(vtos, vtos);
1212 // stack: ..., a, b
1213 __ load_ptr( 0, rax); // load b
1214 __ load_ptr( 1, rcx); // load a
1215 __ store_ptr(1, rax); // store b
1216 __ store_ptr(0, rcx); // store a
1217 __ push_ptr(rax); // push b
1218 // stack: ..., b, a, b
1219 }
1220
1221 void TemplateTable::dup_x2() {
1222 transition(vtos, vtos);
1223 // stack: ..., a, b, c
1224 __ load_ptr( 0, rax); // load c
1225 __ load_ptr( 2, rcx); // load a
1226 __ store_ptr(2, rax); // store c in a
1227 __ push_ptr(rax); // push c
1228 // stack: ..., c, b, c, c
1229 __ load_ptr( 2, rax); // load b
1230 __ store_ptr(2, rcx); // store a in b
1231 // stack: ..., c, a, c, c
1232 __ store_ptr(1, rax); // store b in c
1233 // stack: ..., c, a, b, c
1234 }
1235
1236 void TemplateTable::dup2() {
1237 transition(vtos, vtos);
1238 // stack: ..., a, b
1239 __ load_ptr(1, rax); // load a
1240 __ push_ptr(rax); // push a
1241 __ load_ptr(1, rax); // load b
1242 __ push_ptr(rax); // push b
1243 // stack: ..., a, b, a, b
1244 }
1245
1246
1247 void TemplateTable::dup2_x1() {
1248 transition(vtos, vtos);
1249 // stack: ..., a, b, c
1250 __ load_ptr( 0, rcx); // load c
1251 __ load_ptr( 1, rax); // load b
1252 __ push_ptr(rax); // push b
1253 __ push_ptr(rcx); // push c
1254 // stack: ..., a, b, c, b, c
1255 __ store_ptr(3, rcx); // store c in b
1256 // stack: ..., a, c, c, b, c
1257 __ load_ptr( 4, rcx); // load a
1258 __ store_ptr(2, rcx); // store a in 2nd c
1259 // stack: ..., a, c, a, b, c
1260 __ store_ptr(4, rax); // store b in a
1261 // stack: ..., b, c, a, b, c
1262 }
1263
1264 void TemplateTable::dup2_x2() {
1265 transition(vtos, vtos);
1266 // stack: ..., a, b, c, d
1267 __ load_ptr( 0, rcx); // load d
1268 __ load_ptr( 1, rax); // load c
1269 __ push_ptr(rax); // push c
1270 __ push_ptr(rcx); // push d
1271 // stack: ..., a, b, c, d, c, d
1272 __ load_ptr( 4, rax); // load b
1273 __ store_ptr(2, rax); // store b in d
1274 __ store_ptr(4, rcx); // store d in b
1275 // stack: ..., a, d, c, b, c, d
1276 __ load_ptr( 5, rcx); // load a
1277 __ load_ptr( 3, rax); // load c
1278 __ store_ptr(3, rcx); // store a in c
1279 __ store_ptr(5, rax); // store c in a
1280 // stack: ..., c, d, a, b, c, d
1281 }
1282
1283 void TemplateTable::swap() {
1284 transition(vtos, vtos);
1285 // stack: ..., a, b
1286 __ load_ptr( 1, rcx); // load a
1287 __ load_ptr( 0, rax); // load b
1288 __ store_ptr(0, rcx); // store a in b
1289 __ store_ptr(1, rax); // store b in a
1290 // stack: ..., b, a
1291 }
1292
1293 void TemplateTable::iop2(Operation op) {
1294 transition(itos, itos);
1295 switch (op) {
1296 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1297 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1298 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1299 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1300 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1301 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1302 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
1303 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
1304 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
1305 default : ShouldNotReachHere();
1306 }
1307 }
1308
1309 void TemplateTable::lop2(Operation op) {
1310 transition(ltos, ltos);
1311 #ifdef _LP64
1312 switch (op) {
1313 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
1314 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
1315 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
1316 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
1317 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
1318 default : ShouldNotReachHere();
1319 }
1320 #else
1321 __ pop_l(rbx, rcx);
1322 switch (op) {
1323 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1324 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1325 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1326 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1327 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1328 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1329 default : ShouldNotReachHere();
1330 }
1331 #endif
1332 }
1333
1334 void TemplateTable::idiv() {
1335 transition(itos, itos);
1336 __ movl(rcx, rax);
1337 __ pop_i(rax);
1338 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1339 // they are not equal, one could do a normal division (no correction
1340 // needed), which may speed up this implementation for the common case.
1341 // (see also JVM spec., p.243 & p.271)
1342 __ corrected_idivl(rcx);
1343 }
1344
1345 void TemplateTable::irem() {
1346 transition(itos, itos);
1347 __ movl(rcx, rax);
1348 __ pop_i(rax);
1349 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1350 // they are not equal, one could do a normal division (no correction
1351 // needed), which may speed up this implementation for the common case.
1352 // (see also JVM spec., p.243 & p.271)
1353 __ corrected_idivl(rcx);
1354 __ movl(rax, rdx);
1355 }
1356
1357 void TemplateTable::lmul() {
1358 transition(ltos, ltos);
1359 #ifdef _LP64
1360 __ pop_l(rdx);
1361 __ imulq(rax, rdx);
1362 #else
1363 __ pop_l(rbx, rcx);
1364 __ push(rcx); __ push(rbx);
1365 __ push(rdx); __ push(rax);
1366 __ lmul(2 * wordSize, 0);
1367 __ addptr(rsp, 4 * wordSize); // take off temporaries
1368 #endif
1369 }
1370
1371 void TemplateTable::ldiv() {
1372 transition(ltos, ltos);
1373 #ifdef _LP64
1374 __ mov(rcx, rax);
1375 __ pop_l(rax);
1376 // generate explicit div0 check
1377 __ testq(rcx, rcx);
1378 __ jump_cc(Assembler::zero,
1379 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1380 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1381 // they are not equal, one could do a normal division (no correction
1382 // needed), which may speed up this implementation for the common case.
1383 // (see also JVM spec., p.243 & p.271)
1384 __ corrected_idivq(rcx); // kills rbx
1385 #else
1386 __ pop_l(rbx, rcx);
1387 __ push(rcx); __ push(rbx);
1388 __ push(rdx); __ push(rax);
1389 // check if y = 0
1390 __ orl(rax, rdx);
1391 __ jump_cc(Assembler::zero,
1392 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1393 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1394 __ addptr(rsp, 4 * wordSize); // take off temporaries
1395 #endif
1396 }
1397
1398 void TemplateTable::lrem() {
1399 transition(ltos, ltos);
1400 #ifdef _LP64
1401 __ mov(rcx, rax);
1402 __ pop_l(rax);
1403 __ testq(rcx, rcx);
1404 __ jump_cc(Assembler::zero,
1405 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1406 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1407 // they are not equal, one could do a normal division (no correction
1408 // needed), which may speed up this implementation for the common case.
1409 // (see also JVM spec., p.243 & p.271)
1410 __ corrected_idivq(rcx); // kills rbx
1411 __ mov(rax, rdx);
1412 #else
1413 __ pop_l(rbx, rcx);
1414 __ push(rcx); __ push(rbx);
1415 __ push(rdx); __ push(rax);
1416 // check if y = 0
1417 __ orl(rax, rdx);
1418 __ jump_cc(Assembler::zero,
1419 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1420 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1421 __ addptr(rsp, 4 * wordSize);
1422 #endif
1423 }
1424
1425 void TemplateTable::lshl() {
1426 transition(itos, ltos);
1427 __ movl(rcx, rax); // get shift count
1428 #ifdef _LP64
1429 __ pop_l(rax); // get shift value
1430 __ shlq(rax);
1431 #else
1432 __ pop_l(rax, rdx); // get shift value
1433 __ lshl(rdx, rax);
1434 #endif
1435 }
1436
1437 void TemplateTable::lshr() {
1438 #ifdef _LP64
1439 transition(itos, ltos);
1440 __ movl(rcx, rax); // get shift count
1441 __ pop_l(rax); // get shift value
1442 __ sarq(rax);
1443 #else
1444 transition(itos, ltos);
1445 __ mov(rcx, rax); // get shift count
1446 __ pop_l(rax, rdx); // get shift value
1447 __ lshr(rdx, rax, true);
1448 #endif
1449 }
1450
1451 void TemplateTable::lushr() {
1452 transition(itos, ltos);
1453 #ifdef _LP64
1454 __ movl(rcx, rax); // get shift count
1455 __ pop_l(rax); // get shift value
1456 __ shrq(rax);
1457 #else
1458 __ mov(rcx, rax); // get shift count
1459 __ pop_l(rax, rdx); // get shift value
1460 __ lshr(rdx, rax);
1461 #endif
1462 }
1463
1464 void TemplateTable::fop2(Operation op) {
1465 transition(ftos, ftos);
1466
1467 if (UseSSE >= 1) {
1468 switch (op) {
1469 case add:
1470 __ addss(xmm0, at_rsp());
1471 __ addptr(rsp, Interpreter::stackElementSize);
1472 break;
1473 case sub:
1474 __ movflt(xmm1, xmm0);
1475 __ pop_f(xmm0);
1476 __ subss(xmm0, xmm1);
1477 break;
1478 case mul:
1479 __ mulss(xmm0, at_rsp());
1480 __ addptr(rsp, Interpreter::stackElementSize);
1481 break;
1482 case div:
1483 __ movflt(xmm1, xmm0);
1484 __ pop_f(xmm0);
1485 __ divss(xmm0, xmm1);
1486 break;
1487 case rem:
1488 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1489 // modulo operation. The frem method calls the function
1490 // double fmod(double x, double y) in math.h. The documentation of fmod states:
1491 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1492 // (signalling or quiet) is returned.
1493 //
1494 // On x86_32 platforms the FPU is used to perform the modulo operation. The
1495 // reason is that on 32-bit Windows the sign of modulo operations diverges from
1496 // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1497 // The fprem instruction used on x86_32 is functionally equivalent to
1498 // SharedRuntime::frem in that it returns a NaN.
1499 #ifdef _LP64
1500 __ movflt(xmm1, xmm0);
1501 __ pop_f(xmm0);
1502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1503 #else
1504 __ push_f(xmm0);
1505 __ pop_f();
1506 __ fld_s(at_rsp());
1507 __ fremr(rax);
1508 __ f2ieee();
1509 __ pop(rax); // pop second operand off the stack
1510 __ push_f();
1511 __ pop_f(xmm0);
1512 #endif
1513 break;
1514 default:
1515 ShouldNotReachHere();
1516 break;
1517 }
1518 } else {
1519 #ifdef _LP64
1520 ShouldNotReachHere();
1521 #else
1522 switch (op) {
1523 case add: __ fadd_s (at_rsp()); break;
1524 case sub: __ fsubr_s(at_rsp()); break;
1525 case mul: __ fmul_s (at_rsp()); break;
1526 case div: __ fdivr_s(at_rsp()); break;
1527 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1528 default : ShouldNotReachHere();
1529 }
1530 __ f2ieee();
1531 __ pop(rax); // pop second operand off the stack
1532 #endif // _LP64
1533 }
1534 }
1535
1536 void TemplateTable::dop2(Operation op) {
1537 transition(dtos, dtos);
1538 if (UseSSE >= 2) {
1539 switch (op) {
1540 case add:
1541 __ addsd(xmm0, at_rsp());
1542 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1543 break;
1544 case sub:
1545 __ movdbl(xmm1, xmm0);
1546 __ pop_d(xmm0);
1547 __ subsd(xmm0, xmm1);
1548 break;
1549 case mul:
1550 __ mulsd(xmm0, at_rsp());
1551 __ addptr(rsp, 2 * Interpreter::stackElementSize);
1552 break;
1553 case div:
1554 __ movdbl(xmm1, xmm0);
1555 __ pop_d(xmm0);
1556 __ divsd(xmm0, xmm1);
1557 break;
1558 case rem:
1559 // Similar to fop2(), the modulo operation is performed using the
1560 // SharedRuntime::drem method (on x86_64 platforms) or using the
1561 // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1562 #ifdef _LP64
1563 __ movdbl(xmm1, xmm0);
1564 __ pop_d(xmm0);
1565 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1566 #else
1567 __ push_d(xmm0);
1568 __ pop_d();
1569 __ fld_d(at_rsp());
1570 __ fremr(rax);
1571 __ d2ieee();
1572 __ pop(rax);
1573 __ pop(rdx);
1574 __ push_d();
1575 __ pop_d(xmm0);
1576 #endif
1577 break;
1578 default:
1579 ShouldNotReachHere();
1580 break;
1581 }
1582 } else {
1583 #ifdef _LP64
1584 ShouldNotReachHere();
1585 #else
1586 switch (op) {
1587 case add: __ fadd_d (at_rsp()); break;
1588 case sub: __ fsubr_d(at_rsp()); break;
1589 case mul: {
1590 Label L_strict;
1591 Label L_join;
1592 const Address access_flags (rcx, Method::access_flags_offset());
1593 __ get_method(rcx);
1594 __ movl(rcx, access_flags);
1595 __ testl(rcx, JVM_ACC_STRICT);
1596 __ jccb(Assembler::notZero, L_strict);
1597 __ fmul_d (at_rsp());
1598 __ jmpb(L_join);
1599 __ bind(L_strict);
1600 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1601 __ fmulp();
1602 __ fmul_d (at_rsp());
1603 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1604 __ fmulp();
1605 __ bind(L_join);
1606 break;
1607 }
1608 case div: {
1609 Label L_strict;
1610 Label L_join;
1611 const Address access_flags (rcx, Method::access_flags_offset());
1612 __ get_method(rcx);
1613 __ movl(rcx, access_flags);
1614 __ testl(rcx, JVM_ACC_STRICT);
1615 __ jccb(Assembler::notZero, L_strict);
1616 __ fdivr_d(at_rsp());
1617 __ jmp(L_join);
1618 __ bind(L_strict);
1619 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1620 __ fmul_d (at_rsp());
1621 __ fdivrp();
1622 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1623 __ fmulp();
1624 __ bind(L_join);
1625 break;
1626 }
1627 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1628 default : ShouldNotReachHere();
1629 }
1630 __ d2ieee();
1631 // Pop double precision number from rsp.
1632 __ pop(rax);
1633 __ pop(rdx);
1634 #endif
1635 }
1636 }
1637
1638 void TemplateTable::ineg() {
1639 transition(itos, itos);
1640 __ negl(rax);
1641 }
1642
1643 void TemplateTable::lneg() {
1644 transition(ltos, ltos);
1645 LP64_ONLY(__ negq(rax));
1646 NOT_LP64(__ lneg(rdx, rax));
1647 }
1648
1649 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1650 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1651 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1652 // of 128-bits operands for SSE instructions.
1653 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1654 // Store the value to a 128-bits operand.
1655 operand[0] = lo;
1656 operand[1] = hi;
1657 return operand;
1658 }
1659
1660 // Buffer for 128-bits masks used by SSE instructions.
1661 static jlong float_signflip_pool[2*2];
1662 static jlong double_signflip_pool[2*2];
1663
1664 void TemplateTable::fneg() {
1665 transition(ftos, ftos);
1666 if (UseSSE >= 1) {
1667 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
1668 __ xorps(xmm0, ExternalAddress((address) float_signflip));
1669 } else {
1670 LP64_ONLY(ShouldNotReachHere());
1671 NOT_LP64(__ fchs());
1672 }
1673 }
1674
1675 void TemplateTable::dneg() {
1676 transition(dtos, dtos);
1677 if (UseSSE >= 2) {
1678 static jlong *double_signflip =
1679 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1680 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1681 } else {
1682 #ifdef _LP64
1683 ShouldNotReachHere();
1684 #else
1685 __ fchs();
1686 #endif
1687 }
1688 }
1689
1690 void TemplateTable::iinc() {
1691 transition(vtos, vtos);
1692 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1693 locals_index(rbx);
1694 __ addl(iaddress(rbx), rdx);
1695 }
1696
1697 void TemplateTable::wide_iinc() {
1698 transition(vtos, vtos);
1699 __ movl(rdx, at_bcp(4)); // get constant
1700 locals_index_wide(rbx);
1701 __ bswapl(rdx); // swap bytes & sign-extend constant
1702 __ sarl(rdx, 16);
1703 __ addl(iaddress(rbx), rdx);
1704 // Note: should probably use only one movl to get both
1705 // the index and the constant -> fix this
1706 }
1707
1708 void TemplateTable::convert() {
1709 #ifdef _LP64
1710 // Checking
1711 #ifdef ASSERT
1712 {
1713 TosState tos_in = ilgl;
1714 TosState tos_out = ilgl;
1715 switch (bytecode()) {
1716 case Bytecodes::_i2l: // fall through
1717 case Bytecodes::_i2f: // fall through
1718 case Bytecodes::_i2d: // fall through
1719 case Bytecodes::_i2b: // fall through
1720 case Bytecodes::_i2c: // fall through
1721 case Bytecodes::_i2s: tos_in = itos; break;
1722 case Bytecodes::_l2i: // fall through
1723 case Bytecodes::_l2f: // fall through
1724 case Bytecodes::_l2d: tos_in = ltos; break;
1725 case Bytecodes::_f2i: // fall through
1726 case Bytecodes::_f2l: // fall through
1727 case Bytecodes::_f2d: tos_in = ftos; break;
1728 case Bytecodes::_d2i: // fall through
1729 case Bytecodes::_d2l: // fall through
1730 case Bytecodes::_d2f: tos_in = dtos; break;
1731 default : ShouldNotReachHere();
1732 }
1733 switch (bytecode()) {
1734 case Bytecodes::_l2i: // fall through
1735 case Bytecodes::_f2i: // fall through
1736 case Bytecodes::_d2i: // fall through
1737 case Bytecodes::_i2b: // fall through
1738 case Bytecodes::_i2c: // fall through
1739 case Bytecodes::_i2s: tos_out = itos; break;
1740 case Bytecodes::_i2l: // fall through
1741 case Bytecodes::_f2l: // fall through
1742 case Bytecodes::_d2l: tos_out = ltos; break;
1743 case Bytecodes::_i2f: // fall through
1744 case Bytecodes::_l2f: // fall through
1745 case Bytecodes::_d2f: tos_out = ftos; break;
1746 case Bytecodes::_i2d: // fall through
1747 case Bytecodes::_l2d: // fall through
1748 case Bytecodes::_f2d: tos_out = dtos; break;
1749 default : ShouldNotReachHere();
1750 }
1751 transition(tos_in, tos_out);
1752 }
1753 #endif // ASSERT
1754
1755 static const int64_t is_nan = 0x8000000000000000L;
1756
1757 // Conversion
1758 switch (bytecode()) {
1759 case Bytecodes::_i2l:
1760 __ movslq(rax, rax);
1761 break;
1762 case Bytecodes::_i2f:
1763 __ cvtsi2ssl(xmm0, rax);
1764 break;
1765 case Bytecodes::_i2d:
1766 __ cvtsi2sdl(xmm0, rax);
1767 break;
1768 case Bytecodes::_i2b:
1769 __ movsbl(rax, rax);
1770 break;
1771 case Bytecodes::_i2c:
1772 __ movzwl(rax, rax);
1773 break;
1774 case Bytecodes::_i2s:
1775 __ movswl(rax, rax);
1776 break;
1777 case Bytecodes::_l2i:
1778 __ movl(rax, rax);
1779 break;
1780 case Bytecodes::_l2f:
1781 __ cvtsi2ssq(xmm0, rax);
1782 break;
1783 case Bytecodes::_l2d:
1784 __ cvtsi2sdq(xmm0, rax);
1785 break;
1786 case Bytecodes::_f2i:
1787 {
1788 Label L;
1789 __ cvttss2sil(rax, xmm0);
1790 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1791 __ jcc(Assembler::notEqual, L);
1792 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1793 __ bind(L);
1794 }
1795 break;
1796 case Bytecodes::_f2l:
1797 {
1798 Label L;
1799 __ cvttss2siq(rax, xmm0);
1800 // NaN or overflow/underflow?
1801 __ cmp64(rax, ExternalAddress((address) &is_nan));
1802 __ jcc(Assembler::notEqual, L);
1803 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1804 __ bind(L);
1805 }
1806 break;
1807 case Bytecodes::_f2d:
1808 __ cvtss2sd(xmm0, xmm0);
1809 break;
1810 case Bytecodes::_d2i:
1811 {
1812 Label L;
1813 __ cvttsd2sil(rax, xmm0);
1814 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1815 __ jcc(Assembler::notEqual, L);
1816 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1817 __ bind(L);
1818 }
1819 break;
1820 case Bytecodes::_d2l:
1821 {
1822 Label L;
1823 __ cvttsd2siq(rax, xmm0);
1824 // NaN or overflow/underflow?
1825 __ cmp64(rax, ExternalAddress((address) &is_nan));
1826 __ jcc(Assembler::notEqual, L);
1827 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1828 __ bind(L);
1829 }
1830 break;
1831 case Bytecodes::_d2f:
1832 __ cvtsd2ss(xmm0, xmm0);
1833 break;
1834 default:
1835 ShouldNotReachHere();
1836 }
1837 #else
1838 // Checking
1839 #ifdef ASSERT
1840 { TosState tos_in = ilgl;
1841 TosState tos_out = ilgl;
1842 switch (bytecode()) {
1843 case Bytecodes::_i2l: // fall through
1844 case Bytecodes::_i2f: // fall through
1845 case Bytecodes::_i2d: // fall through
1846 case Bytecodes::_i2b: // fall through
1847 case Bytecodes::_i2c: // fall through
1848 case Bytecodes::_i2s: tos_in = itos; break;
1849 case Bytecodes::_l2i: // fall through
1850 case Bytecodes::_l2f: // fall through
1851 case Bytecodes::_l2d: tos_in = ltos; break;
1852 case Bytecodes::_f2i: // fall through
1853 case Bytecodes::_f2l: // fall through
1854 case Bytecodes::_f2d: tos_in = ftos; break;
1855 case Bytecodes::_d2i: // fall through
1856 case Bytecodes::_d2l: // fall through
1857 case Bytecodes::_d2f: tos_in = dtos; break;
1858 default : ShouldNotReachHere();
1859 }
1860 switch (bytecode()) {
1861 case Bytecodes::_l2i: // fall through
1862 case Bytecodes::_f2i: // fall through
1863 case Bytecodes::_d2i: // fall through
1864 case Bytecodes::_i2b: // fall through
1865 case Bytecodes::_i2c: // fall through
1866 case Bytecodes::_i2s: tos_out = itos; break;
1867 case Bytecodes::_i2l: // fall through
1868 case Bytecodes::_f2l: // fall through
1869 case Bytecodes::_d2l: tos_out = ltos; break;
1870 case Bytecodes::_i2f: // fall through
1871 case Bytecodes::_l2f: // fall through
1872 case Bytecodes::_d2f: tos_out = ftos; break;
1873 case Bytecodes::_i2d: // fall through
1874 case Bytecodes::_l2d: // fall through
1875 case Bytecodes::_f2d: tos_out = dtos; break;
1876 default : ShouldNotReachHere();
1877 }
1878 transition(tos_in, tos_out);
1879 }
1880 #endif // ASSERT
1881
1882 // Conversion
1883 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1884 switch (bytecode()) {
1885 case Bytecodes::_i2l:
1886 __ extend_sign(rdx, rax);
1887 break;
1888 case Bytecodes::_i2f:
1889 if (UseSSE >= 1) {
1890 __ cvtsi2ssl(xmm0, rax);
1891 } else {
1892 __ push(rax); // store int on tos
1893 __ fild_s(at_rsp()); // load int to ST0
1894 __ f2ieee(); // truncate to float size
1895 __ pop(rcx); // adjust rsp
1896 }
1897 break;
1898 case Bytecodes::_i2d:
1899 if (UseSSE >= 2) {
1900 __ cvtsi2sdl(xmm0, rax);
1901 } else {
1902 __ push(rax); // add one slot for d2ieee()
1903 __ push(rax); // store int on tos
1904 __ fild_s(at_rsp()); // load int to ST0
1905 __ d2ieee(); // truncate to double size
1906 __ pop(rcx); // adjust rsp
1907 __ pop(rcx);
1908 }
1909 break;
1910 case Bytecodes::_i2b:
1911 __ shll(rax, 24); // truncate upper 24 bits
1912 __ sarl(rax, 24); // and sign-extend byte
1913 LP64_ONLY(__ movsbl(rax, rax));
1914 break;
1915 case Bytecodes::_i2c:
1916 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1917 LP64_ONLY(__ movzwl(rax, rax));
1918 break;
1919 case Bytecodes::_i2s:
1920 __ shll(rax, 16); // truncate upper 16 bits
1921 __ sarl(rax, 16); // and sign-extend short
1922 LP64_ONLY(__ movswl(rax, rax));
1923 break;
1924 case Bytecodes::_l2i:
1925 /* nothing to do */
1926 break;
1927 case Bytecodes::_l2f:
1928 // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
1929 // 64-bit long values to floats. On 32-bit platforms it is not possible
1930 // to use that instruction with 64-bit operands, therefore the FPU is
1931 // used to perform the conversion.
1932 __ push(rdx); // store long on tos
1933 __ push(rax);
1934 __ fild_d(at_rsp()); // load long to ST0
1935 __ f2ieee(); // truncate to float size
1936 __ pop(rcx); // adjust rsp
1937 __ pop(rcx);
1938 if (UseSSE >= 1) {
1939 __ push_f();
1940 __ pop_f(xmm0);
1941 }
1942 break;
1943 case Bytecodes::_l2d:
1944 // On 32-bit platforms the FPU is used for conversion because on
1945 // 32-bit platforms it is not not possible to use the cvtsi2sdq
1946 // instruction with 64-bit operands.
1947 __ push(rdx); // store long on tos
1948 __ push(rax);
1949 __ fild_d(at_rsp()); // load long to ST0
1950 __ d2ieee(); // truncate to double size
1951 __ pop(rcx); // adjust rsp
1952 __ pop(rcx);
1953 if (UseSSE >= 2) {
1954 __ push_d();
1955 __ pop_d(xmm0);
1956 }
1957 break;
1958 case Bytecodes::_f2i:
1959 // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
1960 // as it returns 0 for any NaN.
1961 if (UseSSE >= 1) {
1962 __ push_f(xmm0);
1963 } else {
1964 __ push(rcx); // reserve space for argument
1965 __ fstp_s(at_rsp()); // pass float argument on stack
1966 }
1967 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1968 break;
1969 case Bytecodes::_f2l:
1970 // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
1971 // as it returns 0 for any NaN.
1972 if (UseSSE >= 1) {
1973 __ push_f(xmm0);
1974 } else {
1975 __ push(rcx); // reserve space for argument
1976 __ fstp_s(at_rsp()); // pass float argument on stack
1977 }
1978 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1979 break;
1980 case Bytecodes::_f2d:
1981 if (UseSSE < 1) {
1982 /* nothing to do */
1983 } else if (UseSSE == 1) {
1984 __ push_f(xmm0);
1985 __ pop_f();
1986 } else { // UseSSE >= 2
1987 __ cvtss2sd(xmm0, xmm0);
1988 }
1989 break;
1990 case Bytecodes::_d2i:
1991 if (UseSSE >= 2) {
1992 __ push_d(xmm0);
1993 } else {
1994 __ push(rcx); // reserve space for argument
1995 __ push(rcx);
1996 __ fstp_d(at_rsp()); // pass double argument on stack
1997 }
1998 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1999 break;
2000 case Bytecodes::_d2l:
2001 if (UseSSE >= 2) {
2002 __ push_d(xmm0);
2003 } else {
2004 __ push(rcx); // reserve space for argument
2005 __ push(rcx);
2006 __ fstp_d(at_rsp()); // pass double argument on stack
2007 }
2008 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2009 break;
2010 case Bytecodes::_d2f:
2011 if (UseSSE <= 1) {
2012 __ push(rcx); // reserve space for f2ieee()
2013 __ f2ieee(); // truncate to float size
2014 __ pop(rcx); // adjust rsp
2015 if (UseSSE == 1) {
2016 // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2017 // the conversion is performed using the FPU in this case.
2018 __ push_f();
2019 __ pop_f(xmm0);
2020 }
2021 } else { // UseSSE >= 2
2022 __ cvtsd2ss(xmm0, xmm0);
2023 }
2024 break;
2025 default :
2026 ShouldNotReachHere();
2027 }
2028 #endif
2029 }
2030
2031 void TemplateTable::lcmp() {
2032 transition(ltos, itos);
2033 #ifdef _LP64
2034 Label done;
2035 __ pop_l(rdx);
2036 __ cmpq(rdx, rax);
2037 __ movl(rax, -1);
2038 __ jccb(Assembler::less, done);
2039 __ setb(Assembler::notEqual, rax);
2040 __ movzbl(rax, rax);
2041 __ bind(done);
2042 #else
2043
2044 // y = rdx:rax
2045 __ pop_l(rbx, rcx); // get x = rcx:rbx
2046 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2047 __ mov(rax, rcx);
2048 #endif
2049 }
2050
2051 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2052 if ((is_float && UseSSE >= 1) ||
2053 (!is_float && UseSSE >= 2)) {
2054 Label done;
2055 if (is_float) {
2056 // XXX get rid of pop here, use ... reg, mem32
2057 __ pop_f(xmm1);
2058 __ ucomiss(xmm1, xmm0);
2059 } else {
2060 // XXX get rid of pop here, use ... reg, mem64
2061 __ pop_d(xmm1);
2062 __ ucomisd(xmm1, xmm0);
2063 }
2064 if (unordered_result < 0) {
2065 __ movl(rax, -1);
2066 __ jccb(Assembler::parity, done);
2067 __ jccb(Assembler::below, done);
2068 __ setb(Assembler::notEqual, rdx);
2069 __ movzbl(rax, rdx);
2070 } else {
2071 __ movl(rax, 1);
2072 __ jccb(Assembler::parity, done);
2073 __ jccb(Assembler::above, done);
2074 __ movl(rax, 0);
2075 __ jccb(Assembler::equal, done);
2076 __ decrementl(rax);
2077 }
2078 __ bind(done);
2079 } else {
2080 #ifdef _LP64
2081 ShouldNotReachHere();
2082 #else
2083 if (is_float) {
2084 __ fld_s(at_rsp());
2085 } else {
2086 __ fld_d(at_rsp());
2087 __ pop(rdx);
2088 }
2089 __ pop(rcx);
2090 __ fcmp2int(rax, unordered_result < 0);
2091 #endif // _LP64
2092 }
2093 }
2094
2095 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2096 __ get_method(rcx); // rcx holds method
2097 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2098 // holds bumped taken count
2099
2100 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2101 InvocationCounter::counter_offset();
2102 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2103 InvocationCounter::counter_offset();
2104
2105 // Load up edx with the branch displacement
2106 if (is_wide) {
2107 __ movl(rdx, at_bcp(1));
2108 } else {
2109 __ load_signed_short(rdx, at_bcp(1));
2110 }
2111 __ bswapl(rdx);
2112
2113 if (!is_wide) {
2114 __ sarl(rdx, 16);
2115 }
2116 LP64_ONLY(__ movl2ptr(rdx, rdx));
2117
2118 // Handle all the JSR stuff here, then exit.
2119 // It's much shorter and cleaner than intermingling with the non-JSR
2120 // normal-branch stuff occurring below.
2121 if (is_jsr) {
2122 // Pre-load the next target bytecode into rbx
2123 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2124
2125 // compute return address as bci in rax
2126 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2127 in_bytes(ConstMethod::codes_offset())));
2128 __ subptr(rax, Address(rcx, Method::const_offset()));
2129 // Adjust the bcp in r13 by the displacement in rdx
2130 __ addptr(rbcp, rdx);
2131 // jsr returns atos that is not an oop
2132 __ push_i(rax);
2133 __ dispatch_only(vtos);
2134 return;
2135 }
2136
2137 // Normal (non-jsr) branch handling
2138
2139 // Adjust the bcp in r13 by the displacement in rdx
2140 __ addptr(rbcp, rdx);
2141
2142 assert(UseLoopCounter || !UseOnStackReplacement,
2143 "on-stack-replacement requires loop counters");
2144 Label backedge_counter_overflow;
2145 Label profile_method;
2146 Label dispatch;
2147 if (UseLoopCounter) {
2148 // increment backedge counter for backward branches
2149 // rax: MDO
2150 // rbx: MDO bumped taken-count
2151 // rcx: method
2152 // rdx: target offset
2153 // r13: target bcp
2154 // r14: locals pointer
2155 __ testl(rdx, rdx); // check if forward or backward branch
2156 __ jcc(Assembler::positive, dispatch); // count only if backward branch
2157
2158 // check if MethodCounters exists
2159 Label has_counters;
2160 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2161 __ testptr(rax, rax);
2162 __ jcc(Assembler::notZero, has_counters);
2163 __ push(rdx);
2164 __ push(rcx);
2165 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2166 rcx);
2167 __ pop(rcx);
2168 __ pop(rdx);
2169 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2170 __ testptr(rax, rax);
2171 __ jcc(Assembler::zero, dispatch);
2172 __ bind(has_counters);
2173
2174 if (TieredCompilation) {
2175 Label no_mdo;
2176 int increment = InvocationCounter::count_increment;
2177 if (ProfileInterpreter) {
2178 // Are we profiling?
2179 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2180 __ testptr(rbx, rbx);
2181 __ jccb(Assembler::zero, no_mdo);
2182 // Increment the MDO backedge counter
2183 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2184 in_bytes(InvocationCounter::counter_offset()));
2185 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2186 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2187 rax, false, Assembler::zero, &backedge_counter_overflow);
2188 __ jmp(dispatch);
2189 }
2190 __ bind(no_mdo);
2191 // Increment backedge counter in MethodCounters*
2192 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2193 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2194 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2195 rax, false, Assembler::zero, &backedge_counter_overflow);
2196 } else { // not TieredCompilation
2197 // increment counter
2198 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2199 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
2200 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2201 __ movl(Address(rcx, be_offset), rax); // store counter
2202
2203 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
2204
2205 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2206 __ addl(rax, Address(rcx, be_offset)); // add both counters
2207
2208 if (ProfileInterpreter) {
2209 // Test to see if we should create a method data oop
2210 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2211 __ jcc(Assembler::less, dispatch);
2212
2213 // if no method data exists, go to profile method
2214 __ test_method_data_pointer(rax, profile_method);
2215
2216 if (UseOnStackReplacement) {
2217 // check for overflow against rbx which is the MDO taken count
2218 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2219 __ jcc(Assembler::below, dispatch);
2220
2221 // When ProfileInterpreter is on, the backedge_count comes
2222 // from the MethodData*, which value does not get reset on
2223 // the call to frequency_counter_overflow(). To avoid
2224 // excessive calls to the overflow routine while the method is
2225 // being compiled, add a second test to make sure the overflow
2226 // function is called only once every overflow_frequency.
2227 const int overflow_frequency = 1024;
2228 __ andl(rbx, overflow_frequency - 1);
2229 __ jcc(Assembler::zero, backedge_counter_overflow);
2230
2231 }
2232 } else {
2233 if (UseOnStackReplacement) {
2234 // check for overflow against rax, which is the sum of the
2235 // counters
2236 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2237 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2238
2239 }
2240 }
2241 }
2242 __ bind(dispatch);
2243 }
2244
2245 // Pre-load the next target bytecode into rbx
2246 __ load_unsigned_byte(rbx, Address(rbcp, 0));
2247
2248 // continue with the bytecode @ target
2249 // rax: return bci for jsr's, unused otherwise
2250 // rbx: target bytecode
2251 // r13: target bcp
2252 __ dispatch_only(vtos);
2253
2254 if (UseLoopCounter) {
2255 if (ProfileInterpreter) {
2256 // Out-of-line code to allocate method data oop.
2257 __ bind(profile_method);
2258 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2259 __ load_unsigned_byte(rbx, Address(rbcp, 0)); // restore target bytecode
2260 __ set_method_data_pointer_for_bcp();
2261 __ jmp(dispatch);
2262 }
2263
2264 if (UseOnStackReplacement) {
2265 // invocation counter overflow
2266 __ bind(backedge_counter_overflow);
2267 __ negptr(rdx);
2268 __ addptr(rdx, rbcp); // branch bcp
2269 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2270 __ call_VM(noreg,
2271 CAST_FROM_FN_PTR(address,
2272 InterpreterRuntime::frequency_counter_overflow),
2273 rdx);
2274 __ load_unsigned_byte(rbx, Address(rbcp, 0)); // restore target bytecode
2275
2276 // rax: osr nmethod (osr ok) or NULL (osr not possible)
2277 // rbx: target bytecode
2278 // rdx: scratch
2279 // r14: locals pointer
2280 // r13: bcp
2281 __ testptr(rax, rax); // test result
2282 __ jcc(Assembler::zero, dispatch); // no osr if null
2283 // nmethod may have been invalidated (VM may block upon call_VM return)
2284 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2285 __ jcc(Assembler::notEqual, dispatch);
2286
2287 // We have the address of an on stack replacement routine in rax
2288 // We need to prepare to execute the OSR method. First we must
2289 // migrate the locals and monitors off of the stack.
2290
2291 LP64_ONLY(__ mov(r13, rax)); // save the nmethod
2292 NOT_LP64(__ mov(rbx, rax)); // save the nmethod
2293 NOT_LP64(__ get_thread(rcx));
2294
2295 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2296
2297 // rax is OSR buffer, move it to expected parameter location
2298 LP64_ONLY(__ mov(j_rarg0, rax));
2299 NOT_LP64(__ mov(rcx, rax));
2300 // We use j_rarg definitions here so that registers don't conflict as parameter
2301 // registers change across platforms as we are in the midst of a calling
2302 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2303
2304 const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2305 const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2306
2307
2308 // pop the interpreter frame
2309 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2310 __ leave(); // remove frame anchor
2311 __ pop(retaddr); // get return address
2312 __ mov(rsp, sender_sp); // set sp to sender sp
2313 // Ensure compiled code always sees stack at proper alignment
2314 __ andptr(rsp, -(StackAlignmentInBytes));
2315
2316 // unlike x86 we need no specialized return from compiled code
2317 // to the interpreter or the call stub.
2318
2319 // push the return address
2320 __ push(retaddr);
2321
2322 // and begin the OSR nmethod
2323 LP64_ONLY(__ jmp(Address(r13, nmethod::osr_entry_point_offset())));
2324 NOT_LP64(__ jmp(Address(rbx, nmethod::osr_entry_point_offset())));
2325 }
2326 }
2327 }
2328
2329 void TemplateTable::if_0cmp(Condition cc) {
2330 transition(itos, vtos);
2331 // assume branch is more often taken than not (loops use backward branches)
2332 Label not_taken;
2333 __ testl(rax, rax);
2334 __ jcc(j_not(cc), not_taken);
2335 branch(false, false);
2336 __ bind(not_taken);
2337 __ profile_not_taken_branch(rax);
2338 }
2339
2340 void TemplateTable::if_icmp(Condition cc) {
2341 transition(itos, vtos);
2342 // assume branch is more often taken than not (loops use backward branches)
2343 Label not_taken;
2344 __ pop_i(rdx);
2345 __ cmpl(rdx, rax);
2346 __ jcc(j_not(cc), not_taken);
2347 branch(false, false);
2348 __ bind(not_taken);
2349 __ profile_not_taken_branch(rax);
2350 }
2351
2352 void TemplateTable::if_nullcmp(Condition cc) {
2353 transition(atos, vtos);
2354 // assume branch is more often taken than not (loops use backward branches)
2355 Label not_taken;
2356 __ testptr(rax, rax);
2357 __ jcc(j_not(cc), not_taken);
2358 branch(false, false);
2359 __ bind(not_taken);
2360 __ profile_not_taken_branch(rax);
2361 }
2362
2363 void TemplateTable::if_acmp(Condition cc) {
2364 transition(atos, vtos);
2365 // assume branch is more often taken than not (loops use backward branches)
2366 Label not_taken;
2367 __ pop_ptr(rdx);
2368 __ cmpptr(rdx, rax);
2369 __ jcc(j_not(cc), not_taken);
2370 branch(false, false);
2371 __ bind(not_taken);
2372 __ profile_not_taken_branch(rax);
2373 }
2374
2375 void TemplateTable::ret() {
2376 transition(vtos, vtos);
2377 locals_index(rbx);
2378 LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2379 NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2380 __ profile_ret(rbx, rcx);
2381 __ get_method(rax);
2382 __ movptr(rbcp, Address(rax, Method::const_offset()));
2383 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2384 ConstMethod::codes_offset()));
2385 __ dispatch_next(vtos);
2386 }
2387
2388 void TemplateTable::wide_ret() {
2389 transition(vtos, vtos);
2390 locals_index_wide(rbx);
2391 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2392 __ profile_ret(rbx, rcx);
2393 __ get_method(rax);
2394 __ movptr(rbcp, Address(rax, Method::const_offset()));
2395 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2396 __ dispatch_next(vtos);
2397 }
2398
2399 void TemplateTable::tableswitch() {
2400 Label default_case, continue_execution;
2401 transition(itos, vtos);
2402
2403 // align r13/rsi
2404 __ lea(rbx, at_bcp(BytesPerInt));
2405 __ andptr(rbx, -BytesPerInt);
2406 // load lo & hi
2407 __ movl(rcx, Address(rbx, BytesPerInt));
2408 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2409 __ bswapl(rcx);
2410 __ bswapl(rdx);
2411 // check against lo & hi
2412 __ cmpl(rax, rcx);
2413 __ jcc(Assembler::less, default_case);
2414 __ cmpl(rax, rdx);
2415 __ jcc(Assembler::greater, default_case);
2416 // lookup dispatch offset
2417 __ subl(rax, rcx);
2418 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2419 __ profile_switch_case(rax, rbx, rcx);
2420 // continue execution
2421 __ bind(continue_execution);
2422 __ bswapl(rdx);
2423 LP64_ONLY(__ movl2ptr(rdx, rdx));
2424 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2425 __ addptr(rbcp, rdx);
2426 __ dispatch_only(vtos);
2427 // handle default
2428 __ bind(default_case);
2429 __ profile_switch_default(rax);
2430 __ movl(rdx, Address(rbx, 0));
2431 __ jmp(continue_execution);
2432 }
2433
2434 void TemplateTable::lookupswitch() {
2435 transition(itos, itos);
2436 __ stop("lookupswitch bytecode should have been rewritten");
2437 }
2438
2439 void TemplateTable::fast_linearswitch() {
2440 transition(itos, vtos);
2441 Label loop_entry, loop, found, continue_execution;
2442 // bswap rax so we can avoid bswapping the table entries
2443 __ bswapl(rax);
2444 // align r13
2445 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2446 // this instruction (change offsets
2447 // below)
2448 __ andptr(rbx, -BytesPerInt);
2449 // set counter
2450 __ movl(rcx, Address(rbx, BytesPerInt));
2451 __ bswapl(rcx);
2452 __ jmpb(loop_entry);
2453 // table search
2454 __ bind(loop);
2455 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2456 __ jcc(Assembler::equal, found);
2457 __ bind(loop_entry);
2458 __ decrementl(rcx);
2459 __ jcc(Assembler::greaterEqual, loop);
2460 // default case
2461 __ profile_switch_default(rax);
2462 __ movl(rdx, Address(rbx, 0));
2463 __ jmp(continue_execution);
2464 // entry found -> get offset
2465 __ bind(found);
2466 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2467 __ profile_switch_case(rcx, rax, rbx);
2468 // continue execution
2469 __ bind(continue_execution);
2470 __ bswapl(rdx);
2471 __ movl2ptr(rdx, rdx);
2472 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2473 __ addptr(rbcp, rdx);
2474 __ dispatch_only(vtos);
2475 }
2476
2477 void TemplateTable::fast_binaryswitch() {
2478 transition(itos, vtos);
2479 // Implementation using the following core algorithm:
2480 //
2481 // int binary_search(int key, LookupswitchPair* array, int n) {
2482 // // Binary search according to "Methodik des Programmierens" by
2483 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2484 // int i = 0;
2485 // int j = n;
2486 // while (i+1 < j) {
2487 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2488 // // with Q: for all i: 0 <= i < n: key < a[i]
2489 // // where a stands for the array and assuming that the (inexisting)
2490 // // element a[n] is infinitely big.
2491 // int h = (i + j) >> 1;
2492 // // i < h < j
2493 // if (key < array[h].fast_match()) {
2494 // j = h;
2495 // } else {
2496 // i = h;
2497 // }
2498 // }
2499 // // R: a[i] <= key < a[i+1] or Q
2500 // // (i.e., if key is within array, i is the correct index)
2501 // return i;
2502 // }
2503
2504 // Register allocation
2505 const Register key = rax; // already set (tosca)
2506 const Register array = rbx;
2507 const Register i = rcx;
2508 const Register j = rdx;
2509 const Register h = rdi;
2510 const Register temp = rsi;
2511
2512 // Find array start
2513 NOT_LP64(__ save_bcp());
2514
2515 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2516 // get rid of this
2517 // instruction (change
2518 // offsets below)
2519 __ andptr(array, -BytesPerInt);
2520
2521 // Initialize i & j
2522 __ xorl(i, i); // i = 0;
2523 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2524
2525 // Convert j into native byteordering
2526 __ bswapl(j);
2527
2528 // And start
2529 Label entry;
2530 __ jmp(entry);
2531
2532 // binary search loop
2533 {
2534 Label loop;
2535 __ bind(loop);
2536 // int h = (i + j) >> 1;
2537 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2538 __ sarl(h, 1); // h = (i + j) >> 1;
2539 // if (key < array[h].fast_match()) {
2540 // j = h;
2541 // } else {
2542 // i = h;
2543 // }
2544 // Convert array[h].match to native byte-ordering before compare
2545 __ movl(temp, Address(array, h, Address::times_8));
2546 __ bswapl(temp);
2547 __ cmpl(key, temp);
2548 // j = h if (key < array[h].fast_match())
2549 __ cmov32(Assembler::less, j, h);
2550 // i = h if (key >= array[h].fast_match())
2551 __ cmov32(Assembler::greaterEqual, i, h);
2552 // while (i+1 < j)
2553 __ bind(entry);
2554 __ leal(h, Address(i, 1)); // i+1
2555 __ cmpl(h, j); // i+1 < j
2556 __ jcc(Assembler::less, loop);
2557 }
2558
2559 // end of binary search, result index is i (must check again!)
2560 Label default_case;
2561 // Convert array[i].match to native byte-ordering before compare
2562 __ movl(temp, Address(array, i, Address::times_8));
2563 __ bswapl(temp);
2564 __ cmpl(key, temp);
2565 __ jcc(Assembler::notEqual, default_case);
2566
2567 // entry found -> j = offset
2568 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2569 __ profile_switch_case(i, key, array);
2570 __ bswapl(j);
2571 LP64_ONLY(__ movslq(j, j));
2572
2573 NOT_LP64(__ restore_bcp());
2574 NOT_LP64(__ restore_locals()); // restore rdi
2575
2576 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2577 __ addptr(rbcp, j);
2578 __ dispatch_only(vtos);
2579
2580 // default case -> j = default offset
2581 __ bind(default_case);
2582 __ profile_switch_default(i);
2583 __ movl(j, Address(array, -2 * BytesPerInt));
2584 __ bswapl(j);
2585 LP64_ONLY(__ movslq(j, j));
2586
2587 NOT_LP64(__ restore_bcp());
2588 NOT_LP64(__ restore_locals());
2589
2590 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2591 __ addptr(rbcp, j);
2592 __ dispatch_only(vtos);
2593 }
2594
2595 void TemplateTable::_return(TosState state) {
2596 transition(state, state);
2597
2598 Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2599
2600 assert(_desc->calls_vm(),
2601 "inconsistent calls_vm information"); // call in remove_activation
2602
2603 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2604 assert(state == vtos, "only valid state");
2605 __ movptr(robj, aaddress(0));
2606 __ load_klass(rdi, robj);
2607 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2608 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2609 Label skip_register_finalizer;
2610 __ jcc(Assembler::zero, skip_register_finalizer);
2611
2612 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2613
2614 __ bind(skip_register_finalizer);
2615 }
2616
2617 __ remove_activation(state, rbcp);
2618 __ jmp(rbcp);
2619 }
2620
2621 // ----------------------------------------------------------------------------
2622 // Volatile variables demand their effects be made known to all CPU's
2623 // in order. Store buffers on most chips allow reads & writes to
2624 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2625 // without some kind of memory barrier (i.e., it's not sufficient that
2626 // the interpreter does not reorder volatile references, the hardware
2627 // also must not reorder them).
2628 //
2629 // According to the new Java Memory Model (JMM):
2630 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2631 // writes act as aquire & release, so:
2632 // (2) A read cannot let unrelated NON-volatile memory refs that
2633 // happen after the read float up to before the read. It's OK for
2634 // non-volatile memory refs that happen before the volatile read to
2635 // float down below it.
2636 // (3) Similar a volatile write cannot let unrelated NON-volatile
2637 // memory refs that happen BEFORE the write float down to after the
2638 // write. It's OK for non-volatile memory refs that happen after the
2639 // volatile write to float up before it.
2640 //
2641 // We only put in barriers around volatile refs (they are expensive),
2642 // not _between_ memory refs (that would require us to track the
2643 // flavor of the previous memory refs). Requirements (2) and (3)
2644 // require some barriers before volatile stores and after volatile
2645 // loads. These nearly cover requirement (1) but miss the
2646 // volatile-store-volatile-load case. This final case is placed after
2647 // volatile-stores although it could just as well go before
2648 // volatile-loads.
2649
2650 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2651 // Helper function to insert a is-volatile test and memory barrier
2652 if(!os::is_MP()) return; // Not needed on single CPU
2653 __ membar(order_constraint);
2654 }
2655
2656 void TemplateTable::resolve_cache_and_index(int byte_no,
2657 Register Rcache,
2658 Register index,
2659 size_t index_size) {
2660 const Register temp = rbx;
2661 assert_different_registers(Rcache, index, temp);
2662
2663 Label resolved;
2664
2665 Bytecodes::Code code = bytecode();
2666 switch (code) {
2667 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2668 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2669 }
2670
2671 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2672 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2673 __ cmpl(temp, code); // have we resolved this bytecode?
2674 __ jcc(Assembler::equal, resolved);
2675
2676 // resolve first time through
2677 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2678 __ movl(temp, code);
2679 __ call_VM(noreg, entry, temp);
2680 // Update registers with resolved info
2681 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2682 __ bind(resolved);
2683 }
2684
2685 // The cache and index registers must be set before call
2686 void TemplateTable::load_field_cp_cache_entry(Register obj,
2687 Register cache,
2688 Register index,
2689 Register off,
2690 Register flags,
2691 bool is_static = false) {
2692 assert_different_registers(cache, index, flags, off);
2693
2694 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2695 // Field offset
2696 __ movptr(off, Address(cache, index, Address::times_ptr,
2697 in_bytes(cp_base_offset +
2698 ConstantPoolCacheEntry::f2_offset())));
2699 // Flags
2700 __ movl(flags, Address(cache, index, Address::times_ptr,
2701 in_bytes(cp_base_offset +
2702 ConstantPoolCacheEntry::flags_offset())));
2703
2704 // klass overwrite register
2705 if (is_static) {
2706 __ movptr(obj, Address(cache, index, Address::times_ptr,
2707 in_bytes(cp_base_offset +
2708 ConstantPoolCacheEntry::f1_offset())));
2709 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2710 __ movptr(obj, Address(obj, mirror_offset));
2711 }
2712 }
2713
2714 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2715 Register method,
2716 Register itable_index,
2717 Register flags,
2718 bool is_invokevirtual,
2719 bool is_invokevfinal, /*unused*/
2720 bool is_invokedynamic) {
2721 // setup registers
2722 const Register cache = rcx;
2723 const Register index = rdx;
2724 assert_different_registers(method, flags);
2725 assert_different_registers(method, cache, index);
2726 assert_different_registers(itable_index, flags);
2727 assert_different_registers(itable_index, cache, index);
2728 // determine constant pool cache field offsets
2729 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2730 const int method_offset = in_bytes(
2731 ConstantPoolCache::base_offset() +
2732 ((byte_no == f2_byte)
2733 ? ConstantPoolCacheEntry::f2_offset()
2734 : ConstantPoolCacheEntry::f1_offset()));
2735 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2736 ConstantPoolCacheEntry::flags_offset());
2737 // access constant pool cache fields
2738 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2739 ConstantPoolCacheEntry::f2_offset());
2740
2741 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2742 resolve_cache_and_index(byte_no, cache, index, index_size);
2743 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2744
2745 if (itable_index != noreg) {
2746 // pick up itable or appendix index from f2 also:
2747 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2748 }
2749 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2750 }
2751
2752 // The registers cache and index expected to be set before call.
2753 // Correct values of the cache and index registers are preserved.
2754 void TemplateTable::jvmti_post_field_access(Register cache,
2755 Register index,
2756 bool is_static,
2757 bool has_tos) {
2758 if (JvmtiExport::can_post_field_access()) {
2759 // Check to see if a field access watch has been set before we take
2760 // the time to call into the VM.
2761 Label L1;
2762 assert_different_registers(cache, index, rax);
2763 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2764 __ testl(rax,rax);
2765 __ jcc(Assembler::zero, L1);
2766
2767 // cache entry pointer
2768 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2769 __ shll(index, LogBytesPerWord);
2770 __ addptr(cache, index);
2771 if (is_static) {
2772 __ xorptr(rax, rax); // NULL object reference
2773 } else {
2774 __ pop(atos); // Get the object
2775 __ verify_oop(rax);
2776 __ push(atos); // Restore stack state
2777 }
2778 // rax,: object pointer or NULL
2779 // cache: cache entry pointer
2780 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2781 rax, cache);
2782 __ get_cache_and_index_at_bcp(cache, index, 1);
2783 __ bind(L1);
2784 }
2785 }
2786
2787 void TemplateTable::pop_and_check_object(Register r) {
2788 __ pop_ptr(r);
2789 __ null_check(r); // for field access must check obj.
2790 __ verify_oop(r);
2791 }
2792
2793 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc, bool is_vgetfield) {
2794 transition(vtos, vtos);
2795
2796 const Register cache = rcx;
2797 const Register index = rdx;
2798 const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2799 const Register off = rbx;
2800 const Register flags = rax;
2801 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2802
2803 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2804 jvmti_post_field_access(cache, index, is_static, false);
2805 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2806
2807 Label Done;
2808
2809 const Address field(obj, off, Address::times_1, 0*wordSize);
2810 NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
2811
2812 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType, notDouble;
2813
2814 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2815 // Make sure we don't need to mask edx after the above shift
2816 assert(btos == 0, "change code, btos != 0");
2817
2818 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2819
2820 __ jcc(Assembler::notZero, notByte);
2821 // btos
2822 if (!is_static) pop_and_check_object(obj);
2823 __ load_signed_byte(rax, field);
2824 __ push(btos);
2825 // Rewrite bytecode to be faster
2826 if (!is_static && rc == may_rewrite) {
2827 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2828 }
2829 __ jmp(Done);
2830
2831 __ bind(notByte);
2832
2833 __ cmpl(flags, qtos);
2834 __ jcc(Assembler::notEqual, notValueType);
2835 // qtos
2836 if (is_static) {
2837 __ load_heap_oop(rax, field);
2838 __ push(qtos);
2839 // if (!is_static && !is_vgetfield) {
2840 // patch_bytecode(Bytecodes::_fast_qgetfield, bc, rbx);
2841 // }
2842 } else {
2843
2844 // cp cache entry pointer
2845 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2846 __ shll(index, LogBytesPerWord);
2847 __ addptr(cache, index);
2848
2849 pop_and_check_object(rax); // not using obj because it contains cp cache entry
2850
2851 // rax,: object pointer or NULL
2852 // cache: cp cache entry pointer
2853 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::qgetfield),
2854 rax, cache);
2855 __ verify_oop(rax);
2856 __ push(qtos);
2857 // Bytecode rewrite?
2858
2859 }
2860 __ jmp(Done);
2861
2862 __ bind(notValueType);
2863
2864 if (!is_static) pop_and_check_object(obj);
2865
2866 __ cmpl(flags, atos);
2867 __ jcc(Assembler::notEqual, notObj);
2868 // atos
2869 __ load_heap_oop(rax, field);
2870 __ push(atos);
2871 if (!is_static && rc == may_rewrite) {
2872 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
2873 }
2874 __ jmp(Done);
2875
2876 __ bind(notObj);
2877 __ cmpl(flags, itos);
2878 __ jcc(Assembler::notEqual, notInt);
2879 // itos
2880 __ movl(rax, field);
2881 __ push(itos);
2882 // Rewrite bytecode to be faster
2883 if (!is_static && rc == may_rewrite) {
2884 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
2885 }
2886 __ jmp(Done);
2887
2888 __ bind(notInt);
2889 __ cmpl(flags, ctos);
2890 __ jcc(Assembler::notEqual, notChar);
2891 // ctos
2892 __ load_unsigned_short(rax, field);
2893 __ push(ctos);
2894 // Rewrite bytecode to be faster
2895 if (!is_static && rc == may_rewrite) {
2896 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
2897 }
2898 __ jmp(Done);
2899
2900 __ bind(notChar);
2901 __ cmpl(flags, stos);
2902 __ jcc(Assembler::notEqual, notShort);
2903 // stos
2904 __ load_signed_short(rax, field);
2905 __ push(stos);
2906 // Rewrite bytecode to be faster
2907 if (!is_static && rc == may_rewrite) {
2908 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
2909 }
2910 __ jmp(Done);
2911
2912 __ bind(notShort);
2913 __ cmpl(flags, ltos);
2914 __ jcc(Assembler::notEqual, notLong);
2915 // ltos
2916
2917 #ifndef _LP64
2918 // Generate code as if volatile. There just aren't enough registers to
2919 // save that information and this code is faster than the test.
2920 __ fild_d(field); // Must load atomically
2921 __ subptr(rsp,2*wordSize); // Make space for store
2922 __ fistp_d(Address(rsp,0));
2923 __ pop(rax);
2924 __ pop(rdx);
2925 #else
2926 __ movq(rax, field);
2927 #endif
2928
2929 __ push(ltos);
2930 // Rewrite bytecode to be faster
2931 LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
2932 __ jmp(Done);
2933
2934 __ bind(notLong);
2935 __ cmpl(flags, ftos);
2936 __ jcc(Assembler::notEqual, notFloat);
2937 // ftos
2938
2939 __ load_float(field);
2940 __ push(ftos);
2941 // Rewrite bytecode to be faster
2942 if (!is_static && rc == may_rewrite) {
2943 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
2944 }
2945 __ jmp(Done);
2946
2947 __ bind(notFloat);
2948 #ifdef ASSERT
2949 __ cmpl(flags, dtos);
2950 __ jcc(Assembler::notEqual, notDouble);
2951 #endif
2952 // dtos
2953 __ load_double(field);
2954 __ push(dtos);
2955 // Rewrite bytecode to be faster
2956 if (!is_static && rc == may_rewrite) {
2957 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
2958 }
2959 #ifdef ASSERT
2960 __ jmp(Done);
2961
2962
2963 __ bind(notDouble);
2964 __ stop("Bad state");
2965 #endif
2966
2967 __ bind(Done);
2968 // [jk] not needed currently
2969 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
2970 // Assembler::LoadStore));
2971 }
2972
2973 void TemplateTable::getfield(int byte_no) {
2974 getfield_or_static(byte_no, false);
2975 }
2976
2977 void TemplateTable::nofast_getfield(int byte_no) {
2978 getfield_or_static(byte_no, false, may_not_rewrite);
2979 }
2980
2981 void TemplateTable::getstatic(int byte_no) {
2982 getfield_or_static(byte_no, true);
2983 }
2984
2985 void TemplateTable::vgetfield(int byte_no) {
2986 // Value types are currently implemented as Java instances, let's try to
2987 // re-use the getfield code. This choice has to be revisited once value types
2988 // are implemented differently (either with immediate value or specific
2989 // storage. Note that the getfield() code rewrites the getfield bytecode
2990 // but current patching code cannot be applied to vgetfield.
2991 getfield_or_static(byte_no, false, may_not_rewrite, true);
2992 }
2993
2994 // The registers cache and index expected to be set before call.
2995 // The function may destroy various registers, just not the cache and index registers.
2996 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2997
2998 const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax);
2999 const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
3000 const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3001 const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3002
3003 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3004
3005 if (JvmtiExport::can_post_field_modification()) {
3006 // Check to see if a field modification watch has been set before
3007 // we take the time to call into the VM.
3008 Label L1;
3009 assert_different_registers(cache, index, rax);
3010 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3011 __ testl(rax, rax);
3012 __ jcc(Assembler::zero, L1);
3013
3014 __ get_cache_and_index_at_bcp(robj, RDX, 1);
3015
3016
3017 if (is_static) {
3018 // Life is simple. Null out the object pointer.
3019 __ xorl(RBX, RBX);
3020
3021 } else {
3022 // Life is harder. The stack holds the value on top, followed by
3023 // the object. We don't know the size of the value, though; it
3024 // could be one or two words depending on its type. As a result,
3025 // we must find the type to determine where the object is.
3026 #ifndef _LP64
3027 Label two_word, valsize_known;
3028 #endif
3029 __ movl(RCX, Address(robj, RDX,
3030 Address::times_ptr,
3031 in_bytes(cp_base_offset +
3032 ConstantPoolCacheEntry::flags_offset())));
3033 NOT_LP64(__ mov(rbx, rsp));
3034 __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3035
3036 // Make sure we don't need to mask rcx after the above shift
3037 ConstantPoolCacheEntry::verify_tos_state_shift();
3038 #ifdef _LP64
3039 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
3040 __ cmpl(c_rarg3, ltos);
3041 __ cmovptr(Assembler::equal,
3042 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3043 __ cmpl(c_rarg3, dtos);
3044 __ cmovptr(Assembler::equal,
3045 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3046 #else
3047 __ cmpl(rcx, ltos);
3048 __ jccb(Assembler::equal, two_word);
3049 __ cmpl(rcx, dtos);
3050 __ jccb(Assembler::equal, two_word);
3051 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3052 __ jmpb(valsize_known);
3053
3054 __ bind(two_word);
3055 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3056
3057 __ bind(valsize_known);
3058 // setup object pointer
3059 __ movptr(rbx, Address(rbx, 0));
3060 #endif
3061 }
3062 // cache entry pointer
3063 __ addptr(robj, in_bytes(cp_base_offset));
3064 __ shll(RDX, LogBytesPerWord);
3065 __ addptr(robj, RDX);
3066 // object (tos)
3067 __ mov(RCX, rsp);
3068 // c_rarg1: object pointer set up above (NULL if static)
3069 // c_rarg2: cache entry pointer
3070 // c_rarg3: jvalue object on the stack
3071 __ call_VM(noreg,
3072 CAST_FROM_FN_PTR(address,
3073 InterpreterRuntime::post_field_modification),
3074 RBX, robj, RCX);
3075 __ get_cache_and_index_at_bcp(cache, index, 1);
3076 __ bind(L1);
3077 }
3078 }
3079
3080 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3081 transition(vtos, vtos);
3082
3083 const Register cache = rcx;
3084 const Register index = rdx;
3085 const Register obj = rcx;
3086 const Register off = rbx;
3087 const Register flags = rax;
3088 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3089
3090 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3091 jvmti_post_field_mod(cache, index, is_static);
3092 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3093
3094 if (!is_static) {
3095 // cache entry pointer for qtos case
3096 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
3097 __ shll(index, LogBytesPerWord);
3098 __ addptr(cache, index);
3099 }
3100
3101 // [jk] not needed currently
3102 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3103 // Assembler::StoreStore));
3104
3105 Label notVolatile, Done;
3106 __ movl(rdx, flags);
3107 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3108 __ andl(rdx, 0x1);
3109
3110 // field addresses
3111 const Address field(obj, off, Address::times_1, 0*wordSize);
3112 NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3113
3114 Label notByte, notInt, notShort, notChar,
3115 notLong, notFloat, notObj, notValueType, notDouble;
3116
3117 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3118
3119 assert(btos == 0, "change code, btos != 0");
3120 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3121 __ jcc(Assembler::notZero, notByte);
3122
3123 // btos
3124 {
3125 __ pop(btos);
3126 if (!is_static) pop_and_check_object(obj);
3127 __ movb(field, rax);
3128 if (!is_static && rc == may_rewrite) {
3129 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3130 }
3131 __ jmp(Done);
3132 }
3133
3134 __ bind(notByte);
3135 __ cmpl(flags, atos);
3136 __ jcc(Assembler::notEqual, notObj);
3137
3138 // atos
3139 {
3140 __ pop(atos);
3141 if (!is_static) pop_and_check_object(obj);
3142 // Store into the field
3143 do_oop_store(_masm, field, rax, _bs->kind(), false);
3144 if (!is_static && rc == may_rewrite) {
3145 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3146 }
3147 __ jmp(Done);
3148 }
3149
3150 __ bind(notObj);
3151 __ cmpl(flags, qtos);
3152 __ jcc(Assembler::notEqual, notValueType);
3153
3154 // qtos
3155 {
3156 __ pop(qtos); // => rax == value
3157 if (!is_static) {
3158 // value types in non-static fields are embedded
3159 // is cache still in rcx ?
3160 pop_and_check_object(rbx);
3161 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::qputfield),
3162 rbx, rax, cache);
3163 __ jmp(notVolatile); // value types are never volatile
3164 } else {
3165 // Store into the static field
3166 // Value types in static fields are currently handled with oops
3167 do_oop_store(_masm, field, rax, _bs->kind(), false);
3168 }
3169 __ jmp(Done);
3170 }
3171
3172 __ bind(notValueType);
3173 __ cmpl(flags, itos);
3174 __ jcc(Assembler::notEqual, notInt);
3175
3176 // itos
3177 {
3178 __ pop(itos);
3179 if (!is_static) pop_and_check_object(obj);
3180 __ movl(field, rax);
3181 if (!is_static && rc == may_rewrite) {
3182 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3183 }
3184 __ jmp(Done);
3185 }
3186
3187 __ bind(notInt);
3188 __ cmpl(flags, ctos);
3189 __ jcc(Assembler::notEqual, notChar);
3190
3191 // ctos
3192 {
3193 __ pop(ctos);
3194 if (!is_static) pop_and_check_object(obj);
3195 __ movw(field, rax);
3196 if (!is_static && rc == may_rewrite) {
3197 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3198 }
3199 __ jmp(Done);
3200 }
3201
3202 __ bind(notChar);
3203 __ cmpl(flags, stos);
3204 __ jcc(Assembler::notEqual, notShort);
3205
3206 // stos
3207 {
3208 __ pop(stos);
3209 if (!is_static) pop_and_check_object(obj);
3210 __ movw(field, rax);
3211 if (!is_static && rc == may_rewrite) {
3212 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3213 }
3214 __ jmp(Done);
3215 }
3216
3217 __ bind(notShort);
3218 __ cmpl(flags, ltos);
3219 __ jcc(Assembler::notEqual, notLong);
3220
3221 // ltos
3222 #ifdef _LP64
3223 {
3224 __ pop(ltos);
3225 if (!is_static) pop_and_check_object(obj);
3226 __ movq(field, rax);
3227 if (!is_static && rc == may_rewrite) {
3228 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3229 }
3230 __ jmp(Done);
3231 }
3232 #else
3233 {
3234 Label notVolatileLong;
3235 __ testl(rdx, rdx);
3236 __ jcc(Assembler::zero, notVolatileLong);
3237
3238 __ pop(ltos); // overwrites rdx, do this after testing volatile.
3239 if (!is_static) pop_and_check_object(obj);
3240
3241 // Replace with real volatile test
3242 __ push(rdx);
3243 __ push(rax); // Must update atomically with FIST
3244 __ fild_d(Address(rsp,0)); // So load into FPU register
3245 __ fistp_d(field); // and put into memory atomically
3246 __ addptr(rsp, 2*wordSize);
3247 // volatile_barrier();
3248 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3249 Assembler::StoreStore));
3250 // Don't rewrite volatile version
3251 __ jmp(notVolatile);
3252
3253 __ bind(notVolatileLong);
3254
3255 __ pop(ltos); // overwrites rdx
3256 if (!is_static) pop_and_check_object(obj);
3257 __ movptr(hi, rdx);
3258 __ movptr(field, rax);
3259 // Don't rewrite to _fast_lputfield for potential volatile case.
3260 __ jmp(notVolatile);
3261 }
3262 #endif // _LP64
3263
3264 __ bind(notLong);
3265 __ cmpl(flags, ftos);
3266 __ jcc(Assembler::notEqual, notFloat);
3267
3268 // ftos
3269 {
3270 __ pop(ftos);
3271 if (!is_static) pop_and_check_object(obj);
3272 __ store_float(field);
3273 if (!is_static && rc == may_rewrite) {
3274 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3275 }
3276 __ jmp(Done);
3277 }
3278
3279 __ bind(notFloat);
3280 #ifdef ASSERT
3281 __ cmpl(flags, dtos);
3282 __ jcc(Assembler::notEqual, notDouble);
3283 #endif
3284
3285 // dtos
3286 {
3287 __ pop(dtos);
3288 if (!is_static) pop_and_check_object(obj);
3289 __ store_double(field);
3290 if (!is_static && rc == may_rewrite) {
3291 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3292 }
3293 }
3294
3295 #ifdef ASSERT
3296 __ jmp(Done);
3297
3298 __ bind(notDouble);
3299 __ stop("Bad state");
3300 #endif
3301
3302 __ bind(Done);
3303
3304 // Check for volatile store
3305 __ testl(rdx, rdx);
3306 __ jcc(Assembler::zero, notVolatile);
3307 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3308 Assembler::StoreStore));
3309 __ bind(notVolatile);
3310 }
3311
3312 void TemplateTable::putfield(int byte_no) {
3313 putfield_or_static(byte_no, false);
3314 }
3315
3316 void TemplateTable::nofast_putfield(int byte_no) {
3317 putfield_or_static(byte_no, false, may_not_rewrite);
3318 }
3319
3320 void TemplateTable::putstatic(int byte_no) {
3321 putfield_or_static(byte_no, true);
3322 }
3323
3324 void TemplateTable::jvmti_post_fast_field_mod() {
3325
3326 const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3327
3328 if (JvmtiExport::can_post_field_modification()) {
3329 // Check to see if a field modification watch has been set before
3330 // we take the time to call into the VM.
3331 Label L2;
3332 __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3333 __ testl(scratch, scratch);
3334 __ jcc(Assembler::zero, L2);
3335 __ pop_ptr(rbx); // copy the object pointer from tos
3336 __ verify_oop(rbx);
3337 __ push_ptr(rbx); // put the object pointer back on tos
3338 // Save tos values before call_VM() clobbers them. Since we have
3339 // to do it for every data type, we use the saved values as the
3340 // jvalue object.
3341 switch (bytecode()) { // load values into the jvalue object
3342 case Bytecodes::_fast_qputfield: // fall through (value types in object are currently oops)
3343 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3344 case Bytecodes::_fast_bputfield: // fall through
3345 case Bytecodes::_fast_sputfield: // fall through
3346 case Bytecodes::_fast_cputfield: // fall through
3347 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3348 case Bytecodes::_fast_dputfield: __ push(dtos); break;
3349 case Bytecodes::_fast_fputfield: __ push(ftos); break;
3350 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3351
3352 default:
3353 ShouldNotReachHere();
3354 }
3355 __ mov(scratch, rsp); // points to jvalue on the stack
3356 // access constant pool cache entry
3357 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3358 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3359 __ verify_oop(rbx);
3360 // rbx: object pointer copied above
3361 // c_rarg2: cache entry pointer
3362 // c_rarg3: jvalue object on the stack
3363 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3364 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3365
3366 switch (bytecode()) { // restore tos values
3367 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3368 case Bytecodes::_fast_bputfield: // fall through
3369 case Bytecodes::_fast_sputfield: // fall through
3370 case Bytecodes::_fast_cputfield: // fall through
3371 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3372 case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3373 case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3374 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3375 }
3376 __ bind(L2);
3377 }
3378 }
3379
3380 void TemplateTable::fast_storefield(TosState state) {
3381 transition(state, vtos);
3382
3383 ByteSize base = ConstantPoolCache::base_offset();
3384
3385 jvmti_post_fast_field_mod();
3386
3387 // access constant pool cache
3388 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3389
3390 // test for volatile with rdx but rdx is tos register for lputfield.
3391 __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3392 in_bytes(base +
3393 ConstantPoolCacheEntry::flags_offset())));
3394
3395 // replace index with field offset from cache entry
3396 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3397 in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3398
3399 // [jk] not needed currently
3400 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3401 // Assembler::StoreStore));
3402
3403 Label notVolatile;
3404 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3405 __ andl(rdx, 0x1);
3406
3407 // Get object from stack
3408 pop_and_check_object(rcx);
3409
3410 // field address
3411 const Address field(rcx, rbx, Address::times_1);
3412
3413 // access field
3414 switch (bytecode()) {
3415 // Value types are currently handled with oops
3416 case Bytecodes::_fast_qputfield: // Fallthrough
3417 case Bytecodes::_fast_aputfield:
3418 do_oop_store(_masm, field, rax, _bs->kind(), false);
3419 break;
3420 case Bytecodes::_fast_lputfield:
3421 #ifdef _LP64
3422 __ movq(field, rax);
3423 #else
3424 __ stop("should not be rewritten");
3425 #endif
3426 break;
3427 case Bytecodes::_fast_iputfield:
3428 __ movl(field, rax);
3429 break;
3430 case Bytecodes::_fast_bputfield:
3431 __ movb(field, rax);
3432 break;
3433 case Bytecodes::_fast_sputfield:
3434 // fall through
3435 case Bytecodes::_fast_cputfield:
3436 __ movw(field, rax);
3437 break;
3438 case Bytecodes::_fast_fputfield:
3439 __ store_float(field);
3440 break;
3441 case Bytecodes::_fast_dputfield:
3442 __ store_double(field);
3443 break;
3444 default:
3445 ShouldNotReachHere();
3446 }
3447
3448 // Check for volatile store
3449 __ testl(rdx, rdx);
3450 __ jcc(Assembler::zero, notVolatile);
3451 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3452 Assembler::StoreStore));
3453 __ bind(notVolatile);
3454 }
3455
3456 void TemplateTable::fast_accessfield(TosState state) {
3457 transition(atos, state);
3458
3459 // Do the JVMTI work here to avoid disturbing the register state below
3460 if (JvmtiExport::can_post_field_access()) {
3461 // Check to see if a field access watch has been set before we
3462 // take the time to call into the VM.
3463 Label L1;
3464 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3465 __ testl(rcx, rcx);
3466 __ jcc(Assembler::zero, L1);
3467 // access constant pool cache entry
3468 LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3469 NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3470 __ verify_oop(rax);
3471 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
3472 LP64_ONLY(__ mov(c_rarg1, rax));
3473 // c_rarg1: object pointer copied above
3474 // c_rarg2: cache entry pointer
3475 LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3476 NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3477 __ pop_ptr(rax); // restore object pointer
3478 __ bind(L1);
3479 }
3480
3481 // access constant pool cache
3482 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3483 // replace index with field offset from cache entry
3484 // [jk] not needed currently
3485 // if (os::is_MP()) {
3486 // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3487 // in_bytes(ConstantPoolCache::base_offset() +
3488 // ConstantPoolCacheEntry::flags_offset())));
3489 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3490 // __ andl(rdx, 0x1);
3491 // }
3492 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3493 in_bytes(ConstantPoolCache::base_offset() +
3494 ConstantPoolCacheEntry::f2_offset())));
3495
3496 // rax: object
3497 __ verify_oop(rax);
3498 __ null_check(rax);
3499 Address field(rax, rbx, Address::times_1);
3500
3501 // access field
3502 switch (bytecode()) {
3503 // Value types are currently implemented with oops
3504 case Bytecodes::_fast_qgetfield: // Fallthrough
3505 case Bytecodes::_fast_agetfield:
3506 __ load_heap_oop(rax, field);
3507 __ verify_oop(rax);
3508 break;
3509 case Bytecodes::_fast_lgetfield:
3510 #ifdef _LP64
3511 __ movq(rax, field);
3512 #else
3513 __ stop("should not be rewritten");
3514 #endif
3515 break;
3516 case Bytecodes::_fast_igetfield:
3517 __ movl(rax, field);
3518 break;
3519 case Bytecodes::_fast_bgetfield:
3520 __ movsbl(rax, field);
3521 break;
3522 case Bytecodes::_fast_sgetfield:
3523 __ load_signed_short(rax, field);
3524 break;
3525 case Bytecodes::_fast_cgetfield:
3526 __ load_unsigned_short(rax, field);
3527 break;
3528 case Bytecodes::_fast_fgetfield:
3529 __ load_float(field);
3530 break;
3531 case Bytecodes::_fast_dgetfield:
3532 __ load_double(field);
3533 break;
3534 default:
3535 ShouldNotReachHere();
3536 }
3537 // [jk] not needed currently
3538 // if (os::is_MP()) {
3539 // Label notVolatile;
3540 // __ testl(rdx, rdx);
3541 // __ jcc(Assembler::zero, notVolatile);
3542 // __ membar(Assembler::LoadLoad);
3543 // __ bind(notVolatile);
3544 //};
3545 }
3546
3547 void TemplateTable::fast_xaccess(TosState state) {
3548 transition(vtos, state);
3549
3550 // get receiver
3551 __ movptr(rax, aaddress(0));
3552 // access constant pool cache
3553 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3554 __ movptr(rbx,
3555 Address(rcx, rdx, Address::times_ptr,
3556 in_bytes(ConstantPoolCache::base_offset() +
3557 ConstantPoolCacheEntry::f2_offset())));
3558 // make sure exception is reported in correct bcp range (getfield is
3559 // next instruction)
3560 __ increment(rbcp);
3561 __ null_check(rax);
3562 const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3563 switch (state) {
3564 case itos:
3565 __ movl(rax, field);
3566 break;
3567 case atos:
3568 __ load_heap_oop(rax, field);
3569 __ verify_oop(rax);
3570 break;
3571 case ftos:
3572 __ load_float(field);
3573 break;
3574 default:
3575 ShouldNotReachHere();
3576 }
3577
3578 // [jk] not needed currently
3579 // if (os::is_MP()) {
3580 // Label notVolatile;
3581 // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3582 // in_bytes(ConstantPoolCache::base_offset() +
3583 // ConstantPoolCacheEntry::flags_offset())));
3584 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3585 // __ testl(rdx, 0x1);
3586 // __ jcc(Assembler::zero, notVolatile);
3587 // __ membar(Assembler::LoadLoad);
3588 // __ bind(notVolatile);
3589 // }
3590
3591 __ decrement(rbcp);
3592 }
3593
3594 //-----------------------------------------------------------------------------
3595 // Calls
3596
3597 void TemplateTable::count_calls(Register method, Register temp) {
3598 // implemented elsewhere
3599 ShouldNotReachHere();
3600 }
3601
3602 void TemplateTable::prepare_invoke(int byte_no,
3603 Register method, // linked method (or i-klass)
3604 Register index, // itable index, MethodType, etc.
3605 Register recv, // if caller wants to see it
3606 Register flags // if caller wants to test it
3607 ) {
3608 // determine flags
3609 const Bytecodes::Code code = bytecode();
3610 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3611 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3612 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3613 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3614 const bool is_invokedirect = code == Bytecodes::_invokedirect;
3615 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3616 const bool load_receiver = (recv != noreg);
3617 const bool save_flags = (flags != noreg);
3618 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3619 assert(save_flags == (is_invokeinterface || is_invokevirtual || is_invokedirect), "need flags for vfinal");
3620 assert(flags == noreg || flags == rdx, "");
3621 assert(recv == noreg || recv == rcx, "");
3622
3623 // setup registers & access constant pool cache
3624 if (recv == noreg) recv = rcx;
3625 if (flags == noreg) flags = rdx;
3626 assert_different_registers(method, index, recv, flags);
3627
3628 // save 'interpreter return address'
3629 __ save_bcp();
3630
3631 load_invoke_cp_cache_entry(byte_no, method, index, flags,
3632 is_invokevirtual || is_invokedirect,
3633 false, is_invokedynamic);
3634
3635 // maybe push appendix to arguments (just before return address)
3636 if (is_invokedynamic || is_invokehandle) {
3637 Label L_no_push;
3638 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3639 __ jcc(Assembler::zero, L_no_push);
3640 // Push the appendix as a trailing parameter.
3641 // This must be done before we get the receiver,
3642 // since the parameter_size includes it.
3643 __ push(rbx);
3644 __ mov(rbx, index);
3645 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3646 __ load_resolved_reference_at_index(index, rbx);
3647 __ pop(rbx);
3648 __ push(index); // push appendix (MethodType, CallSite, etc.)
3649 __ bind(L_no_push);
3650 }
3651
3652 // load receiver if needed (after appendix is pushed so parameter size is correct)
3653 // Note: no return address pushed yet
3654 if (load_receiver) {
3655 __ movl(recv, flags);
3656 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3657 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
3658 const int receiver_is_at_end = -1; // back off one slot to get receiver
3659 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3660 __ movptr(recv, recv_addr);
3661 __ verify_oop(recv);
3662 }
3663
3664 if (save_flags) {
3665 __ movl(rbcp, flags);
3666 }
3667
3668 // compute return type
3669 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3670 // Make sure we don't need to mask flags after the above shift
3671 ConstantPoolCacheEntry::verify_tos_state_shift();
3672 // load return address
3673 {
3674 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3675 ExternalAddress table(table_addr);
3676 LP64_ONLY(__ lea(rscratch1, table));
3677 LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3678 NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3679 }
3680
3681 // push return address
3682 __ push(flags);
3683
3684 // Restore flags value from the constant pool cache, and restore rsi
3685 // for later null checks. r13 is the bytecode pointer
3686 if (save_flags) {
3687 __ movl(flags, rbcp);
3688 __ restore_bcp();
3689 }
3690 }
3691
3692 void TemplateTable::invokevirtual_helper(Register index,
3693 Register recv,
3694 Register flags) {
3695 // Uses temporary registers rax, rdx
3696 assert_different_registers(index, recv, rax, rdx);
3697 assert(index == rbx, "");
3698 assert(recv == rcx, "");
3699
3700 // Test for an invoke of a final method
3701 Label notFinal;
3702 __ movl(rax, flags);
3703 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3704 __ jcc(Assembler::zero, notFinal);
3705
3706 const Register method = index; // method must be rbx
3707 assert(method == rbx,
3708 "Method* must be rbx for interpreter calling convention");
3709
3710 // do the call - the index is actually the method to call
3711 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
3712
3713 // It's final, need a null check here!
3714 __ null_check(recv);
3715
3716 // profile this call
3717 __ profile_final_call(rax);
3718 __ profile_arguments_type(rax, method, rbcp, true);
3719
3720 __ jump_from_interpreted(method, rax);
3721
3722 __ bind(notFinal);
3723
3724 // get receiver klass
3725 __ null_check(recv, oopDesc::klass_offset_in_bytes());
3726 __ load_klass(rax, recv);
3727
3728 // profile this call
3729 __ profile_virtual_call(rax, rlocals, rdx);
3730 // get target Method* & entry point
3731 __ lookup_virtual_method(rax, index, method);
3732 __ profile_called_method(method, rdx, rbcp);
3733
3734 __ profile_arguments_type(rdx, method, rbcp, true);
3735 __ jump_from_interpreted(method, rdx);
3736 }
3737
3738 void TemplateTable::invokevirtual(int byte_no) {
3739 transition(vtos, vtos);
3740 assert(byte_no == f2_byte, "use this argument");
3741 prepare_invoke(byte_no,
3742 rbx, // method or vtable index
3743 noreg, // unused itable index
3744 rcx, rdx); // recv, flags
3745
3746 // rbx: index
3747 // rcx: receiver
3748 // rdx: flags
3749
3750 invokevirtual_helper(rbx, rcx, rdx);
3751 }
3752
3753 /*
3754 * The invokedirect bytecode is implemented as an invokevirtual bytecode:
3755 * The implementation blindly uses resolve_invokevirtual() and assumes
3756 * that a final call will be picked at the end. (Currently unsure about interfaces,
3757 * default methods, and about where "Value.equals(QValue;)Z" should live.)
3758 */
3759 void TemplateTable::invokedirect(int byte_no) {
3760 transition(vtos, vtos);
3761 assert(byte_no == f2_byte, "use this argument");
3762 prepare_invoke(byte_no,
3763 rbx, // method (and not vtable index, as the method to be invoked should be final)
3764 noreg,
3765 rcx, rdx); // recv, flags
3766
3767 // Check if the method is final
3768 Label notFinal;
3769 __ movl(rax, rdx);
3770 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3771 __ jcc(Assembler::zero, notFinal);
3772
3773 __ verify_oop(rcx);
3774 __ null_check(rcx);
3775 __ profile_final_call(rax);
3776 __ profile_arguments_type(rax, rbx, rbcp, true);
3777 __ jump_from_interpreted(rbx, rax);
3778
3779 __ bind(notFinal);
3780 __ stop("Interpreter observed a non-final method as a target of an invokedirect instruction");
3781 }
3782
3783 void TemplateTable::invokespecial(int byte_no) {
3784 transition(vtos, vtos);
3785 assert(byte_no == f1_byte, "use this argument");
3786 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3787 rcx); // get receiver also for null check
3788 __ verify_oop(rcx);
3789 __ null_check(rcx);
3790 // do the call
3791 __ profile_call(rax);
3792 __ profile_arguments_type(rax, rbx, rbcp, false);
3793 __ jump_from_interpreted(rbx, rax);
3794 }
3795
3796 void TemplateTable::invokestatic(int byte_no) {
3797 transition(vtos, vtos);
3798 assert(byte_no == f1_byte, "use this argument");
3799 prepare_invoke(byte_no, rbx); // get f1 Method*
3800 // do the call
3801 __ profile_call(rax);
3802 __ profile_arguments_type(rax, rbx, rbcp, false);
3803 __ jump_from_interpreted(rbx, rax);
3804 }
3805
3806
3807 void TemplateTable::fast_invokevfinal(int byte_no) {
3808 transition(vtos, vtos);
3809 assert(byte_no == f2_byte, "use this argument");
3810 __ stop("fast_invokevfinal not used on x86");
3811 }
3812
3813
3814 void TemplateTable::invokeinterface(int byte_no) {
3815 transition(vtos, vtos);
3816 assert(byte_no == f1_byte, "use this argument");
3817 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3818 rcx, rdx); // recv, flags
3819
3820 // rax: interface klass (from f1)
3821 // rbx: itable index (from f2)
3822 // rcx: receiver
3823 // rdx: flags
3824
3825 // Special case of invokeinterface called for virtual method of
3826 // java.lang.Object. See cpCacheOop.cpp for details.
3827 // This code isn't produced by javac, but could be produced by
3828 // another compliant java compiler.
3829 Label notMethod;
3830 __ movl(rlocals, rdx);
3831 __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3832
3833 __ jcc(Assembler::zero, notMethod);
3834
3835 invokevirtual_helper(rbx, rcx, rdx);
3836 __ bind(notMethod);
3837
3838 // Get receiver klass into rdx - also a null check
3839 __ restore_locals(); // restore r14
3840 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3841 __ load_klass(rdx, rcx);
3842
3843 // profile this call
3844 __ profile_virtual_call(rdx, rbcp, rlocals);
3845
3846 Label no_such_interface, no_such_method;
3847
3848 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3849 rdx, rax, rbx,
3850 // outputs: method, scan temp. reg
3851 rbx, rbcp,
3852 no_such_interface);
3853
3854 // rbx: Method* to call
3855 // rcx: receiver
3856 // Check for abstract method error
3857 // Note: This should be done more efficiently via a throw_abstract_method_error
3858 // interpreter entry point and a conditional jump to it in case of a null
3859 // method.
3860 __ testptr(rbx, rbx);
3861 __ jcc(Assembler::zero, no_such_method);
3862
3863 __ profile_called_method(rbx, rbcp, rdx);
3864 __ profile_arguments_type(rdx, rbx, rbcp, true);
3865
3866 // do the call
3867 // rcx: receiver
3868 // rbx,: Method*
3869 __ jump_from_interpreted(rbx, rdx);
3870 __ should_not_reach_here();
3871
3872 // exception handling code follows...
3873 // note: must restore interpreter registers to canonical
3874 // state for exception handling to work correctly!
3875
3876 __ bind(no_such_method);
3877 // throw exception
3878 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3879 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3880 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3881 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3882 // the call_VM checks for exception, so we should never return here.
3883 __ should_not_reach_here();
3884
3885 __ bind(no_such_interface);
3886 // throw exception
3887 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3888 __ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
3889 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3890 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3891 InterpreterRuntime::throw_IncompatibleClassChangeError));
3892 // the call_VM checks for exception, so we should never return here.
3893 __ should_not_reach_here();
3894 }
3895
3896 void TemplateTable::invokehandle(int byte_no) {
3897 transition(vtos, vtos);
3898 assert(byte_no == f1_byte, "use this argument");
3899 const Register rbx_method = rbx;
3900 const Register rax_mtype = rax;
3901 const Register rcx_recv = rcx;
3902 const Register rdx_flags = rdx;
3903
3904 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3905 __ verify_method_ptr(rbx_method);
3906 __ verify_oop(rcx_recv);
3907 __ null_check(rcx_recv);
3908
3909 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3910 // rbx: MH.invokeExact_MT method (from f2)
3911
3912 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3913
3914 // FIXME: profile the LambdaForm also
3915 __ profile_final_call(rax);
3916 __ profile_arguments_type(rdx, rbx_method, rbcp, true);
3917
3918 __ jump_from_interpreted(rbx_method, rdx);
3919 }
3920
3921 void TemplateTable::invokedynamic(int byte_no) {
3922 transition(vtos, vtos);
3923 assert(byte_no == f1_byte, "use this argument");
3924
3925 const Register rbx_method = rbx;
3926 const Register rax_callsite = rax;
3927
3928 prepare_invoke(byte_no, rbx_method, rax_callsite);
3929
3930 // rax: CallSite object (from cpool->resolved_references[f1])
3931 // rbx: MH.linkToCallSite method (from f2)
3932
3933 // Note: rax_callsite is already pushed by prepare_invoke
3934
3935 // %%% should make a type profile for any invokedynamic that takes a ref argument
3936 // profile this call
3937 __ profile_call(rbcp);
3938 __ profile_arguments_type(rdx, rbx_method, rbcp, false);
3939
3940 __ verify_oop(rax_callsite);
3941
3942 __ jump_from_interpreted(rbx_method, rdx);
3943 }
3944
3945 //-----------------------------------------------------------------------------
3946 // Allocation
3947
3948 void TemplateTable::_new() {
3949 transition(vtos, atos);
3950 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3951 Label slow_case;
3952 Label slow_case_no_pop;
3953 Label done;
3954 Label initialize_header;
3955 Label initialize_object; // including clearing the fields
3956 Label allocate_shared;
3957
3958 __ get_cpool_and_tags(rcx, rax);
3959
3960 // Make sure the class we're about to instantiate has been resolved.
3961 // This is done before loading InstanceKlass to be consistent with the order
3962 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3963 const int tags_offset = Array<u1>::base_offset_in_bytes();
3964 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3965 __ jcc(Assembler::notEqual, slow_case_no_pop);
3966
3967 // get InstanceKlass
3968 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3969 __ push(rcx); // save the contexts of klass for initializing the header
3970
3971 // make sure klass is initialized & doesn't have finalizer
3972 // make sure klass is fully initialized
3973 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3974 __ jcc(Assembler::notEqual, slow_case);
3975
3976 // get instance_size in InstanceKlass (scaled to a count of bytes)
3977 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3978 // test to see if it has a finalizer or is malformed in some way
3979 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3980 __ jcc(Assembler::notZero, slow_case);
3981
3982 //
3983 // Allocate the instance
3984 // 1) Try to allocate in the TLAB
3985 // 2) if fail and the object is large allocate in the shared Eden
3986 // 3) if the above fails (or is not applicable), go to a slow case
3987 // (creates a new TLAB, etc.)
3988
3989 const bool allow_shared_alloc =
3990 Universe::heap()->supports_inline_contig_alloc();
3991
3992 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
3993 #ifndef _LP64
3994 if (UseTLAB || allow_shared_alloc) {
3995 __ get_thread(thread);
3996 }
3997 #endif // _LP64
3998
3999 if (UseTLAB) {
4000 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
4001 __ lea(rbx, Address(rax, rdx, Address::times_1));
4002 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
4003 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
4004 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
4005 if (ZeroTLAB) {
4006 // the fields have been already cleared
4007 __ jmp(initialize_header);
4008 } else {
4009 // initialize both the header and fields
4010 __ jmp(initialize_object);
4011 }
4012 }
4013
4014 // Allocation in the shared Eden, if allowed.
4015 //
4016 // rdx: instance size in bytes
4017 if (allow_shared_alloc) {
4018 __ bind(allocate_shared);
4019
4020 ExternalAddress heap_top((address)Universe::heap()->top_addr());
4021 ExternalAddress heap_end((address)Universe::heap()->end_addr());
4022
4023 Label retry;
4024 __ bind(retry);
4025 __ movptr(rax, heap_top);
4026 __ lea(rbx, Address(rax, rdx, Address::times_1));
4027 __ cmpptr(rbx, heap_end);
4028 __ jcc(Assembler::above, slow_case);
4029
4030 // Compare rax, with the top addr, and if still equal, store the new
4031 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
4032 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
4033 //
4034 // rax,: object begin
4035 // rbx,: object end
4036 // rdx: instance size in bytes
4037 __ locked_cmpxchgptr(rbx, heap_top);
4038
4039 // if someone beat us on the allocation, try again, otherwise continue
4040 __ jcc(Assembler::notEqual, retry);
4041
4042 __ incr_allocated_bytes(thread, rdx, 0);
4043 }
4044
4045 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
4046 // The object is initialized before the header. If the object size is
4047 // zero, go directly to the header initialization.
4048 __ bind(initialize_object);
4049 __ decrement(rdx, sizeof(oopDesc));
4050 __ jcc(Assembler::zero, initialize_header);
4051
4052 // Initialize topmost object field, divide rdx by 8, check if odd and
4053 // test if zero.
4054 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
4055 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4056
4057 // rdx must have been multiple of 8
4058 #ifdef ASSERT
4059 // make sure rdx was multiple of 8
4060 Label L;
4061 // Ignore partial flag stall after shrl() since it is debug VM
4062 __ jccb(Assembler::carryClear, L);
4063 __ stop("object size is not multiple of 2 - adjust this code");
4064 __ bind(L);
4065 // rdx must be > 0, no extra check needed here
4066 #endif
4067
4068 // initialize remaining object fields: rdx was a multiple of 8
4069 { Label loop;
4070 __ bind(loop);
4071 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4072 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4073 __ decrement(rdx);
4074 __ jcc(Assembler::notZero, loop);
4075 }
4076
4077 // initialize object header only.
4078 __ bind(initialize_header);
4079 if (UseBiasedLocking) {
4080 __ pop(rcx); // get saved klass back in the register.
4081 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4082 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4083 } else {
4084 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4085 (intptr_t)markOopDesc::prototype()); // header
4086 __ pop(rcx); // get saved klass back in the register.
4087 }
4088 #ifdef _LP64
4089 __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4090 __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
4091 #endif
4092 __ store_klass(rax, rcx); // klass
4093
4094 {
4095 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4096 // Trigger dtrace event for fastpath
4097 __ push(atos);
4098 __ call_VM_leaf(
4099 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4100 __ pop(atos);
4101 }
4102
4103 __ jmp(done);
4104 }
4105
4106 // slow case
4107 __ bind(slow_case);
4108 __ pop(rcx); // restore stack pointer to what it was when we came in.
4109 __ bind(slow_case_no_pop);
4110
4111 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4112 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4113
4114 __ get_constant_pool(rarg1);
4115 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4116 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4117 __ verify_oop(rax);
4118
4119 // continue
4120 __ bind(done);
4121 }
4122
4123 void TemplateTable::_vnew() {
4124 transition(vtos, qtos);
4125
4126 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4127 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4128 Register rarg3 = LP64_ONLY(c_rarg3) NOT_LP64(rbx);
4129
4130 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4131 __ get_constant_pool(rarg1);
4132 __ movptr(rarg3, rsp);
4133
4134 call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::_vnew),
4135 rarg1, rarg2, rarg3);
4136 // new value type is returned in rbx
4137 // value factory argument size is returned in rax
4138 __ verify_oop(rbx);
4139 __ addptr(rsp, rax);
4140 __ movptr(rax, rbx);
4141
4142 }
4143
4144 void TemplateTable::newarray() {
4145 transition(itos, atos);
4146 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4147 __ load_unsigned_byte(rarg1, at_bcp(1));
4148 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4149 rarg1, rax);
4150 }
4151
4152 void TemplateTable::anewarray() {
4153 transition(itos, atos);
4154
4155 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4156 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4157
4158 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4159 __ get_constant_pool(rarg1);
4160 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4161 rarg1, rarg2, rax);
4162 }
4163
4164 void TemplateTable::vnewarray() {
4165 transition(itos, atos);
4166
4167 Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4168 Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4169
4170 __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4171 __ get_constant_pool(rarg1);
4172 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::vnewarray),
4173 rarg1, rarg2, rax);
4174 }
4175
4176 void TemplateTable::arraylength() {
4177 transition(atos, itos);
4178 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4179 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4180 }
4181
4182 void TemplateTable::checkcast() {
4183 transition(atos, atos);
4184 Label done, is_null, ok_is_subtype, quicked, resolved;
4185 __ testptr(rax, rax); // object is in rax
4186 __ jcc(Assembler::zero, is_null);
4187
4188 // Get cpool & tags index
4189 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4190 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4191 // See if bytecode has already been quicked
4192 __ cmpb(Address(rdx, rbx,
4193 Address::times_1,
4194 Array<u1>::base_offset_in_bytes()),
4195 JVM_CONSTANT_Class);
4196 __ jcc(Assembler::equal, quicked);
4197 __ push(atos); // save receiver for result, and for GC
4198 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4199
4200 // vm_result_2 has metadata result
4201 #ifndef _LP64
4202 // borrow rdi from locals
4203 __ get_thread(rdi);
4204 __ get_vm_result_2(rax, rdi);
4205 __ restore_locals();
4206 #else
4207 __ get_vm_result_2(rax, r15_thread);
4208 #endif
4209
4210 __ pop_ptr(rdx); // restore receiver
4211 __ jmpb(resolved);
4212
4213 // Get superklass in rax and subklass in rbx
4214 __ bind(quicked);
4215 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4216 __ movptr(rax, Address(rcx, rbx,
4217 Address::times_ptr, sizeof(ConstantPool)));
4218
4219 __ bind(resolved);
4220 __ load_klass(rbx, rdx);
4221
4222 // Generate subtype check. Blows rcx, rdi. Object in rdx.
4223 // Superklass in rax. Subklass in rbx.
4224 __ gen_subtype_check(rbx, ok_is_subtype);
4225
4226 // Come here on failure
4227 __ push_ptr(rdx);
4228 // object is at TOS
4229 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4230
4231 // Come here on success
4232 __ bind(ok_is_subtype);
4233 __ mov(rax, rdx); // Restore object in rdx
4234
4235 // Collect counts on whether this check-cast sees NULLs a lot or not.
4236 if (ProfileInterpreter) {
4237 __ jmp(done);
4238 __ bind(is_null);
4239 __ profile_null_seen(rcx);
4240 } else {
4241 __ bind(is_null); // same as 'done'
4242 }
4243 __ bind(done);
4244 }
4245
4246 void TemplateTable::instanceof() {
4247 transition(atos, itos);
4248 Label done, is_null, ok_is_subtype, quicked, resolved;
4249 __ testptr(rax, rax);
4250 __ jcc(Assembler::zero, is_null);
4251
4252 // Get cpool & tags index
4253 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4254 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4255 // See if bytecode has already been quicked
4256 __ cmpb(Address(rdx, rbx,
4257 Address::times_1,
4258 Array<u1>::base_offset_in_bytes()),
4259 JVM_CONSTANT_Class);
4260 __ jcc(Assembler::equal, quicked);
4261
4262 __ push(atos); // save receiver for result, and for GC
4263 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4264 // vm_result_2 has metadata result
4265
4266 #ifndef _LP64
4267 // borrow rdi from locals
4268 __ get_thread(rdi);
4269 __ get_vm_result_2(rax, rdi);
4270 __ restore_locals();
4271 #else
4272 __ get_vm_result_2(rax, r15_thread);
4273 #endif
4274
4275 __ pop_ptr(rdx); // restore receiver
4276 __ verify_oop(rdx);
4277 __ load_klass(rdx, rdx);
4278 __ jmpb(resolved);
4279
4280 // Get superklass in rax and subklass in rdx
4281 __ bind(quicked);
4282 __ load_klass(rdx, rax);
4283 __ movptr(rax, Address(rcx, rbx,
4284 Address::times_ptr, sizeof(ConstantPool)));
4285
4286 __ bind(resolved);
4287
4288 // Generate subtype check. Blows rcx, rdi
4289 // Superklass in rax. Subklass in rdx.
4290 __ gen_subtype_check(rdx, ok_is_subtype);
4291
4292 // Come here on failure
4293 __ xorl(rax, rax);
4294 __ jmpb(done);
4295 // Come here on success
4296 __ bind(ok_is_subtype);
4297 __ movl(rax, 1);
4298
4299 // Collect counts on whether this test sees NULLs a lot or not.
4300 if (ProfileInterpreter) {
4301 __ jmp(done);
4302 __ bind(is_null);
4303 __ profile_null_seen(rcx);
4304 } else {
4305 __ bind(is_null); // same as 'done'
4306 }
4307 __ bind(done);
4308 // rax = 0: obj == NULL or obj is not an instanceof the specified klass
4309 // rax = 1: obj != NULL and obj is an instanceof the specified klass
4310 }
4311
4312
4313 //----------------------------------------------------------------------------------------------------
4314 // Breakpoints
4315 void TemplateTable::_breakpoint() {
4316 // Note: We get here even if we are single stepping..
4317 // jbug insists on setting breakpoints at every bytecode
4318 // even if we are in single step mode.
4319
4320 transition(vtos, vtos);
4321
4322 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4323
4324 // get the unpatched byte code
4325 __ get_method(rarg);
4326 __ call_VM(noreg,
4327 CAST_FROM_FN_PTR(address,
4328 InterpreterRuntime::get_original_bytecode_at),
4329 rarg, rbcp);
4330 __ mov(rbx, rax); // why?
4331
4332 // post the breakpoint event
4333 __ get_method(rarg);
4334 __ call_VM(noreg,
4335 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4336 rarg, rbcp);
4337
4338 // complete the execution of original bytecode
4339 __ dispatch_only_normal(vtos);
4340 }
4341
4342 //-----------------------------------------------------------------------------
4343 // Exceptions
4344
4345 void TemplateTable::athrow() {
4346 transition(atos, vtos);
4347 __ null_check(rax);
4348 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4349 }
4350
4351 //-----------------------------------------------------------------------------
4352 // Synchronization
4353 //
4354 // Note: monitorenter & exit are symmetric routines; which is reflected
4355 // in the assembly code structure as well
4356 //
4357 // Stack layout:
4358 //
4359 // [expressions ] <--- rsp = expression stack top
4360 // ..
4361 // [expressions ]
4362 // [monitor entry] <--- monitor block top = expression stack bot
4363 // ..
4364 // [monitor entry]
4365 // [frame data ] <--- monitor block bot
4366 // ...
4367 // [saved rbp ] <--- rbp
4368 void TemplateTable::monitorenter() {
4369 transition(atos, vtos);
4370
4371 // check for NULL object
4372 __ null_check(rax);
4373
4374 const Address monitor_block_top(
4375 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4376 const Address monitor_block_bot(
4377 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4378 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4379
4380 Label allocated;
4381
4382 Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4383 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4384 Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4385
4386 // initialize entry pointer
4387 __ xorl(rmon, rmon); // points to free slot or NULL
4388
4389 // find a free slot in the monitor block (result in rmon)
4390 {
4391 Label entry, loop, exit;
4392 __ movptr(rtop, monitor_block_top); // points to current entry,
4393 // starting with top-most entry
4394 __ lea(rbot, monitor_block_bot); // points to word before bottom
4395 // of monitor block
4396 __ jmpb(entry);
4397
4398 __ bind(loop);
4399 // check if current entry is used
4400 __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4401 // if not used then remember entry in rmon
4402 __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr
4403 // check if current entry is for same object
4404 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4405 // if same object then stop searching
4406 __ jccb(Assembler::equal, exit);
4407 // otherwise advance to next entry
4408 __ addptr(rtop, entry_size);
4409 __ bind(entry);
4410 // check if bottom reached
4411 __ cmpptr(rtop, rbot);
4412 // if not at bottom then check this entry
4413 __ jcc(Assembler::notEqual, loop);
4414 __ bind(exit);
4415 }
4416
4417 __ testptr(rmon, rmon); // check if a slot has been found
4418 __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4419
4420 // allocate one if there's no free slot
4421 {
4422 Label entry, loop;
4423 // 1. compute new pointers // rsp: old expression stack top
4424 __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4425 __ subptr(rsp, entry_size); // move expression stack top
4426 __ subptr(rmon, entry_size); // move expression stack bottom
4427 __ mov(rtop, rsp); // set start value for copy loop
4428 __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4429 __ jmp(entry);
4430 // 2. move expression stack contents
4431 __ bind(loop);
4432 __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4433 // word from old location
4434 __ movptr(Address(rtop, 0), rbot); // and store it at new location
4435 __ addptr(rtop, wordSize); // advance to next word
4436 __ bind(entry);
4437 __ cmpptr(rtop, rmon); // check if bottom reached
4438 __ jcc(Assembler::notEqual, loop); // if not at bottom then
4439 // copy next word
4440 }
4441
4442 // call run-time routine
4443 // rmon: points to monitor entry
4444 __ bind(allocated);
4445
4446 // Increment bcp to point to the next bytecode, so exception
4447 // handling for async. exceptions work correctly.
4448 // The object has already been poped from the stack, so the
4449 // expression stack looks correct.
4450 __ increment(rbcp);
4451
4452 // store object
4453 __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4454 __ lock_object(rmon);
4455
4456 // check to make sure this monitor doesn't cause stack overflow after locking
4457 __ save_bcp(); // in case of exception
4458 __ generate_stack_overflow_check(0);
4459
4460 // The bcp has already been incremented. Just need to dispatch to
4461 // next instruction.
4462 __ dispatch_next(vtos);
4463 }
4464
4465 void TemplateTable::monitorexit() {
4466 transition(atos, vtos);
4467
4468 // check for NULL object
4469 __ null_check(rax);
4470
4471 const Address monitor_block_top(
4472 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4473 const Address monitor_block_bot(
4474 rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4475 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4476
4477 Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4478 Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4479
4480 Label found;
4481
4482 // find matching slot
4483 {
4484 Label entry, loop;
4485 __ movptr(rtop, monitor_block_top); // points to current entry,
4486 // starting with top-most entry
4487 __ lea(rbot, monitor_block_bot); // points to word before bottom
4488 // of monitor block
4489 __ jmpb(entry);
4490
4491 __ bind(loop);
4492 // check if current entry is for same object
4493 __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4494 // if same object then stop searching
4495 __ jcc(Assembler::equal, found);
4496 // otherwise advance to next entry
4497 __ addptr(rtop, entry_size);
4498 __ bind(entry);
4499 // check if bottom reached
4500 __ cmpptr(rtop, rbot);
4501 // if not at bottom then check this entry
4502 __ jcc(Assembler::notEqual, loop);
4503 }
4504
4505 // error handling. Unlocking was not block-structured
4506 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4507 InterpreterRuntime::throw_illegal_monitor_state_exception));
4508 __ should_not_reach_here();
4509
4510 // call run-time routine
4511 __ bind(found);
4512 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4513 __ unlock_object(rtop);
4514 __ pop_ptr(rax); // discard object
4515 }
4516
4517 // Wide instructions
4518 void TemplateTable::wide() {
4519 transition(vtos, vtos);
4520 __ load_unsigned_byte(rbx, at_bcp(1));
4521 ExternalAddress wtable((address)Interpreter::_wentry_point);
4522 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4523 // Note: the rbcp increment step is part of the individual wide bytecode implementations
4524 }
4525
4526 // Multi arrays
4527 void TemplateTable::multianewarray() {
4528 transition(vtos, atos);
4529
4530 Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4531 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4532 // last dim is on top of stack; we want address of first one:
4533 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4534 // the latter wordSize to point to the beginning of the array.
4535 __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4536 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4537 __ load_unsigned_byte(rbx, at_bcp(3));
4538 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
4539 }