1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "interpreter/interp_masm.hpp"
30 #include "interpreter/templateTable.hpp"
31 #include "memory/universe.inline.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/objArrayKlass.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "utilities/macros.hpp"
40
41 #ifndef CC_INTERP
42 #define __ _masm->
43
44 //----------------------------------------------------------------------------------------------------
45 // Platform-dependent initialization
46
47 void TemplateTable::pd_initialize() {
48 // No i486 specific initialization
49 }
50
51 //----------------------------------------------------------------------------------------------------
52 // Address computation
53
54 // local variables
55 static inline Address iaddress(int n) {
56 return Address(rdi, Interpreter::local_offset_in_bytes(n));
57 }
58
59 static inline Address laddress(int n) { return iaddress(n + 1); }
60 static inline Address haddress(int n) { return iaddress(n + 0); }
61 static inline Address faddress(int n) { return iaddress(n); }
62 static inline Address daddress(int n) { return laddress(n); }
63 static inline Address aaddress(int n) { return iaddress(n); }
64
65 static inline Address iaddress(Register r) {
66 return Address(rdi, r, Interpreter::stackElementScale());
67 }
68 static inline Address laddress(Register r) {
69 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
70 }
71 static inline Address haddress(Register r) {
72 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
73 }
74
75 static inline Address faddress(Register r) { return iaddress(r); }
76 static inline Address daddress(Register r) { return laddress(r); }
77 static inline Address aaddress(Register r) { return iaddress(r); }
78
79 // expression stack
80 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
81 // data beyond the rsp which is potentially unsafe in an MT environment;
82 // an interrupt may overwrite that data.)
83 static inline Address at_rsp () {
84 return Address(rsp, 0);
85 }
86
87 // At top of Java expression stack which may be different than rsp(). It
88 // isn't for category 1 objects.
89 static inline Address at_tos () {
90 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
91 return tos;
92 }
93
94 static inline Address at_tos_p1() {
95 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
96 }
97
98 static inline Address at_tos_p2() {
99 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
100 }
101
102 // Condition conversion
103 static Assembler::Condition j_not(TemplateTable::Condition cc) {
104 switch (cc) {
105 case TemplateTable::equal : return Assembler::notEqual;
106 case TemplateTable::not_equal : return Assembler::equal;
107 case TemplateTable::less : return Assembler::greaterEqual;
108 case TemplateTable::less_equal : return Assembler::greater;
109 case TemplateTable::greater : return Assembler::lessEqual;
110 case TemplateTable::greater_equal: return Assembler::less;
111 }
112 ShouldNotReachHere();
113 return Assembler::zero;
114 }
115
116
117 //----------------------------------------------------------------------------------------------------
118 // Miscelaneous helper routines
119
120 // Store an oop (or NULL) at the address described by obj.
121 // If val == noreg this means store a NULL
122
123 static void do_oop_store(InterpreterMacroAssembler* _masm,
124 Address obj,
125 Register val,
126 BarrierSet::Name barrier,
127 bool precise) {
128 assert(val == noreg || val == rax, "parameter is just for looks");
129 switch (barrier) {
130 #if INCLUDE_ALL_GCS
131 case BarrierSet::G1SATBCT:
132 case BarrierSet::G1SATBCTLogging:
133 {
134 // flatten object address if needed
135 // We do it regardless of precise because we need the registers
136 if (obj.index() == noreg && obj.disp() == 0) {
137 if (obj.base() != rdx) {
138 __ movl(rdx, obj.base());
139 }
140 } else {
141 __ leal(rdx, obj);
142 }
143 __ get_thread(rcx);
144 __ save_bcp();
145 __ g1_write_barrier_pre(rdx /* obj */,
146 rbx /* pre_val */,
147 rcx /* thread */,
148 rsi /* tmp */,
149 val != noreg /* tosca_live */,
150 false /* expand_call */);
151
152 // Do the actual store
153 // noreg means NULL
154 if (val == noreg) {
155 __ movptr(Address(rdx, 0), NULL_WORD);
156 // No post barrier for NULL
157 } else {
158 __ movl(Address(rdx, 0), val);
159 __ g1_write_barrier_post(rdx /* store_adr */,
160 val /* new_val */,
161 rcx /* thread */,
162 rbx /* tmp */,
163 rsi /* tmp2 */);
164 }
165 __ restore_bcp();
166
167 }
168 break;
169 #endif // INCLUDE_ALL_GCS
170 case BarrierSet::CardTableModRef:
171 case BarrierSet::CardTableExtension:
172 {
173 if (val == noreg) {
174 __ movptr(obj, NULL_WORD);
175 } else {
176 __ movl(obj, val);
177 // flatten object address if needed
178 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
179 __ store_check(obj.base());
180 } else {
181 __ leal(rdx, obj);
182 __ store_check(rdx);
183 }
184 }
185 }
186 break;
187 case BarrierSet::ModRef:
188 case BarrierSet::Other:
189 if (val == noreg) {
190 __ movptr(obj, NULL_WORD);
191 } else {
192 __ movl(obj, val);
193 }
194 break;
195 default :
196 ShouldNotReachHere();
197
198 }
199 }
200
201 Address TemplateTable::at_bcp(int offset) {
202 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
203 return Address(rsi, offset);
204 }
205
206
207 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
208 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
209 int byte_no) {
210 if (!RewriteBytecodes) return;
211 Label L_patch_done;
212
213 switch (bc) {
214 case Bytecodes::_fast_aputfield:
215 case Bytecodes::_fast_bputfield:
216 case Bytecodes::_fast_cputfield:
217 case Bytecodes::_fast_dputfield:
218 case Bytecodes::_fast_fputfield:
219 case Bytecodes::_fast_iputfield:
220 case Bytecodes::_fast_lputfield:
221 case Bytecodes::_fast_sputfield:
222 {
223 // We skip bytecode quickening for putfield instructions when
224 // the put_code written to the constant pool cache is zero.
225 // This is required so that every execution of this instruction
226 // calls out to InterpreterRuntime::resolve_get_put to do
227 // additional, required work.
228 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
229 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
230 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
231 __ movl(bc_reg, bc);
232 __ cmpl(temp_reg, (int) 0);
233 __ jcc(Assembler::zero, L_patch_done); // don't patch
234 }
235 break;
236 default:
237 assert(byte_no == -1, "sanity");
238 // the pair bytecodes have already done the load.
239 if (load_bc_into_bc_reg) {
240 __ movl(bc_reg, bc);
241 }
242 }
243
244 if (JvmtiExport::can_post_breakpoint()) {
245 Label L_fast_patch;
246 // if a breakpoint is present we can't rewrite the stream directly
247 __ movzbl(temp_reg, at_bcp(0));
248 __ cmpl(temp_reg, Bytecodes::_breakpoint);
249 __ jcc(Assembler::notEqual, L_fast_patch);
250 __ get_method(temp_reg);
251 // Let breakpoint table handling rewrite to quicker bytecode
252 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
253 #ifndef ASSERT
254 __ jmpb(L_patch_done);
255 #else
256 __ jmp(L_patch_done);
257 #endif
258 __ bind(L_fast_patch);
259 }
260
261 #ifdef ASSERT
262 Label L_okay;
263 __ load_unsigned_byte(temp_reg, at_bcp(0));
264 __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
265 __ jccb(Assembler::equal, L_okay);
266 __ cmpl(temp_reg, bc_reg);
267 __ jcc(Assembler::equal, L_okay);
268 __ stop("patching the wrong bytecode");
269 __ bind(L_okay);
270 #endif
271
272 // patch bytecode
273 __ movb(at_bcp(0), bc_reg);
274 __ bind(L_patch_done);
275 }
276
277 //----------------------------------------------------------------------------------------------------
278 // Individual instructions
279
280 void TemplateTable::nop() {
281 transition(vtos, vtos);
282 // nothing to do
283 }
284
285 void TemplateTable::shouldnotreachhere() {
286 transition(vtos, vtos);
287 __ stop("shouldnotreachhere bytecode");
288 }
289
290
291
292 void TemplateTable::aconst_null() {
293 transition(vtos, atos);
294 __ xorptr(rax, rax);
295 }
296
297
298 void TemplateTable::iconst(int value) {
299 transition(vtos, itos);
300 if (value == 0) {
301 __ xorptr(rax, rax);
302 } else {
303 __ movptr(rax, value);
304 }
305 }
306
307
308 void TemplateTable::lconst(int value) {
309 transition(vtos, ltos);
310 if (value == 0) {
311 __ xorptr(rax, rax);
312 } else {
313 __ movptr(rax, value);
314 }
315 assert(value >= 0, "check this code");
316 __ xorptr(rdx, rdx);
317 }
318
319
320 void TemplateTable::fconst(int value) {
321 transition(vtos, ftos);
322 if (value == 0) { __ fldz();
323 } else if (value == 1) { __ fld1();
324 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
325 } else { ShouldNotReachHere();
326 }
327 }
328
329
330 void TemplateTable::dconst(int value) {
331 transition(vtos, dtos);
332 if (value == 0) { __ fldz();
333 } else if (value == 1) { __ fld1();
334 } else { ShouldNotReachHere();
335 }
336 }
337
338
339 void TemplateTable::bipush() {
340 transition(vtos, itos);
341 __ load_signed_byte(rax, at_bcp(1));
342 }
343
344
345 void TemplateTable::sipush() {
346 transition(vtos, itos);
347 __ load_unsigned_short(rax, at_bcp(1));
348 __ bswapl(rax);
349 __ sarl(rax, 16);
350 }
351
352 void TemplateTable::ldc(bool wide) {
353 transition(vtos, vtos);
354 Label call_ldc, notFloat, notClass, Done;
355
356 if (wide) {
357 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
358 } else {
359 __ load_unsigned_byte(rbx, at_bcp(1));
360 }
361 __ get_cpool_and_tags(rcx, rax);
362 const int base_offset = ConstantPool::header_size() * wordSize;
363 const int tags_offset = Array<u1>::base_offset_in_bytes();
364
365 // get type
366 __ xorptr(rdx, rdx);
367 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
368
369 // unresolved class - get the resolved class
370 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
371 __ jccb(Assembler::equal, call_ldc);
372
373 // unresolved class in error (resolution failed) - call into runtime
374 // so that the same error from first resolution attempt is thrown.
375 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
376 __ jccb(Assembler::equal, call_ldc);
377
378 // resolved class - need to call vm to get java mirror of the class
379 __ cmpl(rdx, JVM_CONSTANT_Class);
380 __ jcc(Assembler::notEqual, notClass);
381
382 __ bind(call_ldc);
383 __ movl(rcx, wide);
384 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
385 __ push(atos);
386 __ jmp(Done);
387
388 __ bind(notClass);
389 __ cmpl(rdx, JVM_CONSTANT_Float);
390 __ jccb(Assembler::notEqual, notFloat);
391 // ftos
392 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
393 __ push(ftos);
394 __ jmp(Done);
395
396 __ bind(notFloat);
397 #ifdef ASSERT
398 { Label L;
399 __ cmpl(rdx, JVM_CONSTANT_Integer);
400 __ jcc(Assembler::equal, L);
401 // String and Object are rewritten to fast_aldc
402 __ stop("unexpected tag type in ldc");
403 __ bind(L);
404 }
405 #endif
406 // itos JVM_CONSTANT_Integer only
407 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
408 __ push(itos);
409 __ bind(Done);
410 }
411
412 // Fast path for caching oop constants.
413 void TemplateTable::fast_aldc(bool wide) {
414 transition(vtos, atos);
415
416 Register result = rax;
417 Register tmp = rdx;
418 int index_size = wide ? sizeof(u2) : sizeof(u1);
419
420 Label resolved;
421
422 // We are resolved if the resolved reference cache entry contains a
423 // non-null object (String, MethodType, etc.)
424 assert_different_registers(result, tmp);
425 __ get_cache_index_at_bcp(tmp, 1, index_size);
426 __ load_resolved_reference_at_index(result, tmp);
427 __ testl(result, result);
428 __ jcc(Assembler::notZero, resolved);
429
430 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
431
432 // first time invocation - must resolve first
433 __ movl(tmp, (int)bytecode());
434 __ call_VM(result, entry, tmp);
435
436 __ bind(resolved);
437
438 if (VerifyOops) {
439 __ verify_oop(result);
440 }
441 }
442
443 void TemplateTable::ldc2_w() {
444 transition(vtos, vtos);
445 Label Long, Done;
446 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
447
448 __ get_cpool_and_tags(rcx, rax);
449 const int base_offset = ConstantPool::header_size() * wordSize;
450 const int tags_offset = Array<u1>::base_offset_in_bytes();
451
452 // get type
453 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
454 __ jccb(Assembler::notEqual, Long);
455 // dtos
456 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
457 __ push(dtos);
458 __ jmpb(Done);
459
460 __ bind(Long);
461 // ltos
462 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
463 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
464
465 __ push(ltos);
466
467 __ bind(Done);
468 }
469
470
471 void TemplateTable::locals_index(Register reg, int offset) {
472 __ load_unsigned_byte(reg, at_bcp(offset));
473 __ negptr(reg);
474 }
475
476
477 void TemplateTable::iload() {
478 transition(vtos, itos);
479 if (RewriteFrequentPairs) {
480 Label rewrite, done;
481
482 // get next byte
483 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
484 // if _iload, wait to rewrite to iload2. We only want to rewrite the
485 // last two iloads in a pair. Comparing against fast_iload means that
486 // the next bytecode is neither an iload or a caload, and therefore
487 // an iload pair.
488 __ cmpl(rbx, Bytecodes::_iload);
489 __ jcc(Assembler::equal, done);
490
491 __ cmpl(rbx, Bytecodes::_fast_iload);
492 __ movl(rcx, Bytecodes::_fast_iload2);
493 __ jccb(Assembler::equal, rewrite);
494
495 // if _caload, rewrite to fast_icaload
496 __ cmpl(rbx, Bytecodes::_caload);
497 __ movl(rcx, Bytecodes::_fast_icaload);
498 __ jccb(Assembler::equal, rewrite);
499
500 // rewrite so iload doesn't check again.
501 __ movl(rcx, Bytecodes::_fast_iload);
502
503 // rewrite
504 // rcx: fast bytecode
505 __ bind(rewrite);
506 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
507 __ bind(done);
508 }
509
510 // Get the local value into tos
511 locals_index(rbx);
512 __ movl(rax, iaddress(rbx));
513 }
514
515
516 void TemplateTable::fast_iload2() {
517 transition(vtos, itos);
518 locals_index(rbx);
519 __ movl(rax, iaddress(rbx));
520 __ push(itos);
521 locals_index(rbx, 3);
522 __ movl(rax, iaddress(rbx));
523 }
524
525 void TemplateTable::fast_iload() {
526 transition(vtos, itos);
527 locals_index(rbx);
528 __ movl(rax, iaddress(rbx));
529 }
530
531
532 void TemplateTable::lload() {
533 transition(vtos, ltos);
534 locals_index(rbx);
535 __ movptr(rax, laddress(rbx));
536 NOT_LP64(__ movl(rdx, haddress(rbx)));
537 }
538
539
540 void TemplateTable::fload() {
541 transition(vtos, ftos);
542 locals_index(rbx);
543 __ fld_s(faddress(rbx));
544 }
545
546
547 void TemplateTable::dload() {
548 transition(vtos, dtos);
549 locals_index(rbx);
550 __ fld_d(daddress(rbx));
551 }
552
553
554 void TemplateTable::aload() {
555 transition(vtos, atos);
556 locals_index(rbx);
557 __ movptr(rax, aaddress(rbx));
558 }
559
560
561 void TemplateTable::locals_index_wide(Register reg) {
562 __ load_unsigned_short(reg, at_bcp(2));
563 __ bswapl(reg);
564 __ shrl(reg, 16);
565 __ negptr(reg);
566 }
567
568
569 void TemplateTable::wide_iload() {
570 transition(vtos, itos);
571 locals_index_wide(rbx);
572 __ movl(rax, iaddress(rbx));
573 }
574
575
576 void TemplateTable::wide_lload() {
577 transition(vtos, ltos);
578 locals_index_wide(rbx);
579 __ movptr(rax, laddress(rbx));
580 NOT_LP64(__ movl(rdx, haddress(rbx)));
581 }
582
583
584 void TemplateTable::wide_fload() {
585 transition(vtos, ftos);
586 locals_index_wide(rbx);
587 __ fld_s(faddress(rbx));
588 }
589
590
591 void TemplateTable::wide_dload() {
592 transition(vtos, dtos);
593 locals_index_wide(rbx);
594 __ fld_d(daddress(rbx));
595 }
596
597
598 void TemplateTable::wide_aload() {
599 transition(vtos, atos);
600 locals_index_wide(rbx);
601 __ movptr(rax, aaddress(rbx));
602 }
603
604 void TemplateTable::index_check(Register array, Register index) {
605 // Pop ptr into array
606 __ pop_ptr(array);
607 index_check_without_pop(array, index);
608 }
609
610 void TemplateTable::index_check_without_pop(Register array, Register index) {
611 // destroys rbx,
612 // check array
613 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
614 LP64_ONLY(__ movslq(index, index));
615 // check index
616 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
617 if (index != rbx) {
618 // ??? convention: move aberrant index into rbx, for exception message
619 assert(rbx != array, "different registers");
620 __ mov(rbx, index);
621 }
622 __ jump_cc(Assembler::aboveEqual,
623 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
624 }
625
626
627 void TemplateTable::iaload() {
628 transition(itos, itos);
629 // rdx: array
630 index_check(rdx, rax); // kills rbx,
631 // rax,: index
632 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
633 }
634
635
636 void TemplateTable::laload() {
637 transition(itos, ltos);
638 // rax,: index
639 // rdx: array
640 index_check(rdx, rax);
641 __ mov(rbx, rax);
642 // rbx,: index
643 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
644 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
645 }
646
647
648 void TemplateTable::faload() {
649 transition(itos, ftos);
650 // rdx: array
651 index_check(rdx, rax); // kills rbx,
652 // rax,: index
653 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
654 }
655
656
657 void TemplateTable::daload() {
658 transition(itos, dtos);
659 // rdx: array
660 index_check(rdx, rax); // kills rbx,
661 // rax,: index
662 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
663 }
664
665
666 void TemplateTable::aaload() {
667 transition(itos, atos);
668 // rdx: array
669 index_check(rdx, rax); // kills rbx,
670 // rax,: index
671 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
672 }
673
674
675 void TemplateTable::baload() {
676 transition(itos, itos);
677 // rdx: array
678 index_check(rdx, rax); // kills rbx,
679 // rax,: index
680 // can do better code for P5 - fix this at some point
681 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
682 __ mov(rax, rbx);
683 }
684
685
686 void TemplateTable::caload() {
687 transition(itos, itos);
688 // rdx: array
689 index_check(rdx, rax); // kills rbx,
690 // rax,: index
691 // can do better code for P5 - may want to improve this at some point
692 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
693 __ mov(rax, rbx);
694 }
695
696 // iload followed by caload frequent pair
697 void TemplateTable::fast_icaload() {
698 transition(vtos, itos);
699 // load index out of locals
700 locals_index(rbx);
701 __ movl(rax, iaddress(rbx));
702
703 // rdx: array
704 index_check(rdx, rax);
705 // rax,: index
706 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
707 __ mov(rax, rbx);
708 }
709
710 void TemplateTable::saload() {
711 transition(itos, itos);
712 // rdx: array
713 index_check(rdx, rax); // kills rbx,
714 // rax,: index
715 // can do better code for P5 - may want to improve this at some point
716 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
717 __ mov(rax, rbx);
718 }
719
720
721 void TemplateTable::iload(int n) {
722 transition(vtos, itos);
723 __ movl(rax, iaddress(n));
724 }
725
726
727 void TemplateTable::lload(int n) {
728 transition(vtos, ltos);
729 __ movptr(rax, laddress(n));
730 NOT_LP64(__ movptr(rdx, haddress(n)));
731 }
732
733
734 void TemplateTable::fload(int n) {
735 transition(vtos, ftos);
736 __ fld_s(faddress(n));
737 }
738
739
740 void TemplateTable::dload(int n) {
741 transition(vtos, dtos);
742 __ fld_d(daddress(n));
743 }
744
745
746 void TemplateTable::aload(int n) {
747 transition(vtos, atos);
748 __ movptr(rax, aaddress(n));
749 }
750
751
752 void TemplateTable::aload_0() {
753 transition(vtos, atos);
754 // According to bytecode histograms, the pairs:
755 //
756 // _aload_0, _fast_igetfield
757 // _aload_0, _fast_agetfield
758 // _aload_0, _fast_fgetfield
759 //
760 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
761 // bytecode checks if the next bytecode is either _fast_igetfield,
762 // _fast_agetfield or _fast_fgetfield and then rewrites the
763 // current bytecode into a pair bytecode; otherwise it rewrites the current
764 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
765 //
766 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
767 // otherwise we may miss an opportunity for a pair.
768 //
769 // Also rewrite frequent pairs
770 // aload_0, aload_1
771 // aload_0, iload_1
772 // These bytecodes with a small amount of code are most profitable to rewrite
773 if (RewriteFrequentPairs) {
774 Label rewrite, done;
775 // get next byte
776 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
777
778 // do actual aload_0
779 aload(0);
780
781 // if _getfield then wait with rewrite
782 __ cmpl(rbx, Bytecodes::_getfield);
783 __ jcc(Assembler::equal, done);
784
785 // if _igetfield then reqrite to _fast_iaccess_0
786 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
787 __ cmpl(rbx, Bytecodes::_fast_igetfield);
788 __ movl(rcx, Bytecodes::_fast_iaccess_0);
789 __ jccb(Assembler::equal, rewrite);
790
791 // if _agetfield then reqrite to _fast_aaccess_0
792 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
793 __ cmpl(rbx, Bytecodes::_fast_agetfield);
794 __ movl(rcx, Bytecodes::_fast_aaccess_0);
795 __ jccb(Assembler::equal, rewrite);
796
797 // if _fgetfield then reqrite to _fast_faccess_0
798 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
799 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
800 __ movl(rcx, Bytecodes::_fast_faccess_0);
801 __ jccb(Assembler::equal, rewrite);
802
803 // else rewrite to _fast_aload0
804 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
805 __ movl(rcx, Bytecodes::_fast_aload_0);
806
807 // rewrite
808 // rcx: fast bytecode
809 __ bind(rewrite);
810 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
811
812 __ bind(done);
813 } else {
814 aload(0);
815 }
816 }
817
818 void TemplateTable::istore() {
819 transition(itos, vtos);
820 locals_index(rbx);
821 __ movl(iaddress(rbx), rax);
822 }
823
824
825 void TemplateTable::lstore() {
826 transition(ltos, vtos);
827 locals_index(rbx);
828 __ movptr(laddress(rbx), rax);
829 NOT_LP64(__ movptr(haddress(rbx), rdx));
830 }
831
832
833 void TemplateTable::fstore() {
834 transition(ftos, vtos);
835 locals_index(rbx);
836 __ fstp_s(faddress(rbx));
837 }
838
839
840 void TemplateTable::dstore() {
841 transition(dtos, vtos);
842 locals_index(rbx);
843 __ fstp_d(daddress(rbx));
844 }
845
846
847 void TemplateTable::astore() {
848 transition(vtos, vtos);
849 __ pop_ptr(rax);
850 locals_index(rbx);
851 __ movptr(aaddress(rbx), rax);
852 }
853
854
855 void TemplateTable::wide_istore() {
856 transition(vtos, vtos);
857 __ pop_i(rax);
858 locals_index_wide(rbx);
859 __ movl(iaddress(rbx), rax);
860 }
861
862
863 void TemplateTable::wide_lstore() {
864 transition(vtos, vtos);
865 __ pop_l(rax, rdx);
866 locals_index_wide(rbx);
867 __ movptr(laddress(rbx), rax);
868 NOT_LP64(__ movl(haddress(rbx), rdx));
869 }
870
871
872 void TemplateTable::wide_fstore() {
873 wide_istore();
874 }
875
876
877 void TemplateTable::wide_dstore() {
878 wide_lstore();
879 }
880
881
882 void TemplateTable::wide_astore() {
883 transition(vtos, vtos);
884 __ pop_ptr(rax);
885 locals_index_wide(rbx);
886 __ movptr(aaddress(rbx), rax);
887 }
888
889
890 void TemplateTable::iastore() {
891 transition(itos, vtos);
892 __ pop_i(rbx);
893 // rax,: value
894 // rdx: array
895 index_check(rdx, rbx); // prefer index in rbx,
896 // rbx,: index
897 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
898 }
899
900
901 void TemplateTable::lastore() {
902 transition(ltos, vtos);
903 __ pop_i(rbx);
904 // rax,: low(value)
905 // rcx: array
906 // rdx: high(value)
907 index_check(rcx, rbx); // prefer index in rbx,
908 // rbx,: index
909 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
910 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
911 }
912
913
914 void TemplateTable::fastore() {
915 transition(ftos, vtos);
916 __ pop_i(rbx);
917 // rdx: array
918 // st0: value
919 index_check(rdx, rbx); // prefer index in rbx,
920 // rbx,: index
921 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
922 }
923
924
925 void TemplateTable::dastore() {
926 transition(dtos, vtos);
927 __ pop_i(rbx);
928 // rdx: array
929 // st0: value
930 index_check(rdx, rbx); // prefer index in rbx,
931 // rbx,: index
932 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
933 }
934
935
936 void TemplateTable::aastore() {
937 Label is_null, ok_is_subtype, done;
938 transition(vtos, vtos);
939 // stack: ..., array, index, value
940 __ movptr(rax, at_tos()); // Value
941 __ movl(rcx, at_tos_p1()); // Index
942 __ movptr(rdx, at_tos_p2()); // Array
943
944 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
945 index_check_without_pop(rdx, rcx); // kills rbx,
946 // do array store check - check for NULL value first
947 __ testptr(rax, rax);
948 __ jcc(Assembler::zero, is_null);
949
950 // Move subklass into EBX
951 __ load_klass(rbx, rax);
952 // Move superklass into EAX
953 __ load_klass(rax, rdx);
954 __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset()));
955 // Compress array+index*wordSize+12 into a single register. Frees ECX.
956 __ lea(rdx, element_address);
957
958 // Generate subtype check. Blows ECX. Resets EDI to locals.
959 // Superklass in EAX. Subklass in EBX.
960 __ gen_subtype_check( rbx, ok_is_subtype );
961
962 // Come here on failure
963 // object is at TOS
964 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
965
966 // Come here on success
967 __ bind(ok_is_subtype);
968
969 // Get the value to store
970 __ movptr(rax, at_rsp());
971 // and store it with appropriate barrier
972 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
973
974 __ jmp(done);
975
976 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
977 __ bind(is_null);
978 __ profile_null_seen(rbx);
979
980 // Store NULL, (noreg means NULL to do_oop_store)
981 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
982
983 // Pop stack arguments
984 __ bind(done);
985 __ addptr(rsp, 3 * Interpreter::stackElementSize);
986 }
987
988
989 void TemplateTable::bastore() {
990 transition(itos, vtos);
991 __ pop_i(rbx);
992 // rax,: value
993 // rdx: array
994 index_check(rdx, rbx); // prefer index in rbx,
995 // rbx,: index
996 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
997 }
998
999
1000 void TemplateTable::castore() {
1001 transition(itos, vtos);
1002 __ pop_i(rbx);
1003 // rax,: value
1004 // rdx: array
1005 index_check(rdx, rbx); // prefer index in rbx,
1006 // rbx,: index
1007 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1008 }
1009
1010
1011 void TemplateTable::sastore() {
1012 castore();
1013 }
1014
1015
1016 void TemplateTable::istore(int n) {
1017 transition(itos, vtos);
1018 __ movl(iaddress(n), rax);
1019 }
1020
1021
1022 void TemplateTable::lstore(int n) {
1023 transition(ltos, vtos);
1024 __ movptr(laddress(n), rax);
1025 NOT_LP64(__ movptr(haddress(n), rdx));
1026 }
1027
1028
1029 void TemplateTable::fstore(int n) {
1030 transition(ftos, vtos);
1031 __ fstp_s(faddress(n));
1032 }
1033
1034
1035 void TemplateTable::dstore(int n) {
1036 transition(dtos, vtos);
1037 __ fstp_d(daddress(n));
1038 }
1039
1040
1041 void TemplateTable::astore(int n) {
1042 transition(vtos, vtos);
1043 __ pop_ptr(rax);
1044 __ movptr(aaddress(n), rax);
1045 }
1046
1047
1048 void TemplateTable::pop() {
1049 transition(vtos, vtos);
1050 __ addptr(rsp, Interpreter::stackElementSize);
1051 }
1052
1053
1054 void TemplateTable::pop2() {
1055 transition(vtos, vtos);
1056 __ addptr(rsp, 2*Interpreter::stackElementSize);
1057 }
1058
1059
1060 void TemplateTable::dup() {
1061 transition(vtos, vtos);
1062 // stack: ..., a
1063 __ load_ptr(0, rax);
1064 __ push_ptr(rax);
1065 // stack: ..., a, a
1066 }
1067
1068
1069 void TemplateTable::dup_x1() {
1070 transition(vtos, vtos);
1071 // stack: ..., a, b
1072 __ load_ptr( 0, rax); // load b
1073 __ load_ptr( 1, rcx); // load a
1074 __ store_ptr(1, rax); // store b
1075 __ store_ptr(0, rcx); // store a
1076 __ push_ptr(rax); // push b
1077 // stack: ..., b, a, b
1078 }
1079
1080
1081 void TemplateTable::dup_x2() {
1082 transition(vtos, vtos);
1083 // stack: ..., a, b, c
1084 __ load_ptr( 0, rax); // load c
1085 __ load_ptr( 2, rcx); // load a
1086 __ store_ptr(2, rax); // store c in a
1087 __ push_ptr(rax); // push c
1088 // stack: ..., c, b, c, c
1089 __ load_ptr( 2, rax); // load b
1090 __ store_ptr(2, rcx); // store a in b
1091 // stack: ..., c, a, c, c
1092 __ store_ptr(1, rax); // store b in c
1093 // stack: ..., c, a, b, c
1094 }
1095
1096
1097 void TemplateTable::dup2() {
1098 transition(vtos, vtos);
1099 // stack: ..., a, b
1100 __ load_ptr(1, rax); // load a
1101 __ push_ptr(rax); // push a
1102 __ load_ptr(1, rax); // load b
1103 __ push_ptr(rax); // push b
1104 // stack: ..., a, b, a, b
1105 }
1106
1107
1108 void TemplateTable::dup2_x1() {
1109 transition(vtos, vtos);
1110 // stack: ..., a, b, c
1111 __ load_ptr( 0, rcx); // load c
1112 __ load_ptr( 1, rax); // load b
1113 __ push_ptr(rax); // push b
1114 __ push_ptr(rcx); // push c
1115 // stack: ..., a, b, c, b, c
1116 __ store_ptr(3, rcx); // store c in b
1117 // stack: ..., a, c, c, b, c
1118 __ load_ptr( 4, rcx); // load a
1119 __ store_ptr(2, rcx); // store a in 2nd c
1120 // stack: ..., a, c, a, b, c
1121 __ store_ptr(4, rax); // store b in a
1122 // stack: ..., b, c, a, b, c
1123 // stack: ..., b, c, a, b, c
1124 }
1125
1126
1127 void TemplateTable::dup2_x2() {
1128 transition(vtos, vtos);
1129 // stack: ..., a, b, c, d
1130 __ load_ptr( 0, rcx); // load d
1131 __ load_ptr( 1, rax); // load c
1132 __ push_ptr(rax); // push c
1133 __ push_ptr(rcx); // push d
1134 // stack: ..., a, b, c, d, c, d
1135 __ load_ptr( 4, rax); // load b
1136 __ store_ptr(2, rax); // store b in d
1137 __ store_ptr(4, rcx); // store d in b
1138 // stack: ..., a, d, c, b, c, d
1139 __ load_ptr( 5, rcx); // load a
1140 __ load_ptr( 3, rax); // load c
1141 __ store_ptr(3, rcx); // store a in c
1142 __ store_ptr(5, rax); // store c in a
1143 // stack: ..., c, d, a, b, c, d
1144 // stack: ..., c, d, a, b, c, d
1145 }
1146
1147
1148 void TemplateTable::swap() {
1149 transition(vtos, vtos);
1150 // stack: ..., a, b
1151 __ load_ptr( 1, rcx); // load a
1152 __ load_ptr( 0, rax); // load b
1153 __ store_ptr(0, rcx); // store a in b
1154 __ store_ptr(1, rax); // store b in a
1155 // stack: ..., b, a
1156 }
1157
1158
1159 void TemplateTable::iop2(Operation op) {
1160 transition(itos, itos);
1161 switch (op) {
1162 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
1163 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1164 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
1165 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
1166 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
1167 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
1168 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1169 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1170 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
1171 default : ShouldNotReachHere();
1172 }
1173 }
1174
1175
1176 void TemplateTable::lop2(Operation op) {
1177 transition(ltos, ltos);
1178 __ pop_l(rbx, rcx);
1179 switch (op) {
1180 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1181 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1182 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1183 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1184 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1185 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1186 default : ShouldNotReachHere();
1187 }
1188 }
1189
1190
1191 void TemplateTable::idiv() {
1192 transition(itos, itos);
1193 __ mov(rcx, rax);
1194 __ pop_i(rax);
1195 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1196 // they are not equal, one could do a normal division (no correction
1197 // needed), which may speed up this implementation for the common case.
1198 // (see also JVM spec., p.243 & p.271)
1199 __ corrected_idivl(rcx);
1200 }
1201
1202
1203 void TemplateTable::irem() {
1204 transition(itos, itos);
1205 __ mov(rcx, rax);
1206 __ pop_i(rax);
1207 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1208 // they are not equal, one could do a normal division (no correction
1209 // needed), which may speed up this implementation for the common case.
1210 // (see also JVM spec., p.243 & p.271)
1211 __ corrected_idivl(rcx);
1212 __ mov(rax, rdx);
1213 }
1214
1215
1216 void TemplateTable::lmul() {
1217 transition(ltos, ltos);
1218 __ pop_l(rbx, rcx);
1219 __ push(rcx); __ push(rbx);
1220 __ push(rdx); __ push(rax);
1221 __ lmul(2 * wordSize, 0);
1222 __ addptr(rsp, 4 * wordSize); // take off temporaries
1223 }
1224
1225
1226 void TemplateTable::ldiv() {
1227 transition(ltos, ltos);
1228 __ pop_l(rbx, rcx);
1229 __ push(rcx); __ push(rbx);
1230 __ push(rdx); __ push(rax);
1231 // check if y = 0
1232 __ orl(rax, rdx);
1233 __ jump_cc(Assembler::zero,
1234 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1235 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1236 __ addptr(rsp, 4 * wordSize); // take off temporaries
1237 }
1238
1239
1240 void TemplateTable::lrem() {
1241 transition(ltos, ltos);
1242 __ pop_l(rbx, rcx);
1243 __ push(rcx); __ push(rbx);
1244 __ push(rdx); __ push(rax);
1245 // check if y = 0
1246 __ orl(rax, rdx);
1247 __ jump_cc(Assembler::zero,
1248 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1249 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1250 __ addptr(rsp, 4 * wordSize);
1251 }
1252
1253
1254 void TemplateTable::lshl() {
1255 transition(itos, ltos);
1256 __ movl(rcx, rax); // get shift count
1257 __ pop_l(rax, rdx); // get shift value
1258 __ lshl(rdx, rax);
1259 }
1260
1261
1262 void TemplateTable::lshr() {
1263 transition(itos, ltos);
1264 __ mov(rcx, rax); // get shift count
1265 __ pop_l(rax, rdx); // get shift value
1266 __ lshr(rdx, rax, true);
1267 }
1268
1269
1270 void TemplateTable::lushr() {
1271 transition(itos, ltos);
1272 __ mov(rcx, rax); // get shift count
1273 __ pop_l(rax, rdx); // get shift value
1274 __ lshr(rdx, rax);
1275 }
1276
1277
1278 void TemplateTable::fop2(Operation op) {
1279 transition(ftos, ftos);
1280 switch (op) {
1281 case add: __ fadd_s (at_rsp()); break;
1282 case sub: __ fsubr_s(at_rsp()); break;
1283 case mul: __ fmul_s (at_rsp()); break;
1284 case div: __ fdivr_s(at_rsp()); break;
1285 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
1286 default : ShouldNotReachHere();
1287 }
1288 __ f2ieee();
1289 __ pop(rax); // pop float thing off
1290 }
1291
1292
1293 void TemplateTable::dop2(Operation op) {
1294 transition(dtos, dtos);
1295
1296 switch (op) {
1297 case add: __ fadd_d (at_rsp()); break;
1298 case sub: __ fsubr_d(at_rsp()); break;
1299 case mul: {
1300 Label L_strict;
1301 Label L_join;
1302 const Address access_flags (rcx, Method::access_flags_offset());
1303 __ get_method(rcx);
1304 __ movl(rcx, access_flags);
1305 __ testl(rcx, JVM_ACC_STRICT);
1306 __ jccb(Assembler::notZero, L_strict);
1307 __ fmul_d (at_rsp());
1308 __ jmpb(L_join);
1309 __ bind(L_strict);
1310 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1311 __ fmulp();
1312 __ fmul_d (at_rsp());
1313 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1314 __ fmulp();
1315 __ bind(L_join);
1316 break;
1317 }
1318 case div: {
1319 Label L_strict;
1320 Label L_join;
1321 const Address access_flags (rcx, Method::access_flags_offset());
1322 __ get_method(rcx);
1323 __ movl(rcx, access_flags);
1324 __ testl(rcx, JVM_ACC_STRICT);
1325 __ jccb(Assembler::notZero, L_strict);
1326 __ fdivr_d(at_rsp());
1327 __ jmp(L_join);
1328 __ bind(L_strict);
1329 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1330 __ fmul_d (at_rsp());
1331 __ fdivrp();
1332 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1333 __ fmulp();
1334 __ bind(L_join);
1335 break;
1336 }
1337 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
1338 default : ShouldNotReachHere();
1339 }
1340 __ d2ieee();
1341 // Pop double precision number from rsp.
1342 __ pop(rax);
1343 __ pop(rdx);
1344 }
1345
1346
1347 void TemplateTable::ineg() {
1348 transition(itos, itos);
1349 __ negl(rax);
1350 }
1351
1352
1353 void TemplateTable::lneg() {
1354 transition(ltos, ltos);
1355 __ lneg(rdx, rax);
1356 }
1357
1358
1359 void TemplateTable::fneg() {
1360 transition(ftos, ftos);
1361 __ fchs();
1362 }
1363
1364
1365 void TemplateTable::dneg() {
1366 transition(dtos, dtos);
1367 __ fchs();
1368 }
1369
1370
1371 void TemplateTable::iinc() {
1372 transition(vtos, vtos);
1373 __ load_signed_byte(rdx, at_bcp(2)); // get constant
1374 locals_index(rbx);
1375 __ addl(iaddress(rbx), rdx);
1376 }
1377
1378
1379 void TemplateTable::wide_iinc() {
1380 transition(vtos, vtos);
1381 __ movl(rdx, at_bcp(4)); // get constant
1382 locals_index_wide(rbx);
1383 __ bswapl(rdx); // swap bytes & sign-extend constant
1384 __ sarl(rdx, 16);
1385 __ addl(iaddress(rbx), rdx);
1386 // Note: should probably use only one movl to get both
1387 // the index and the constant -> fix this
1388 }
1389
1390
1391 void TemplateTable::convert() {
1392 // Checking
1393 #ifdef ASSERT
1394 { TosState tos_in = ilgl;
1395 TosState tos_out = ilgl;
1396 switch (bytecode()) {
1397 case Bytecodes::_i2l: // fall through
1398 case Bytecodes::_i2f: // fall through
1399 case Bytecodes::_i2d: // fall through
1400 case Bytecodes::_i2b: // fall through
1401 case Bytecodes::_i2c: // fall through
1402 case Bytecodes::_i2s: tos_in = itos; break;
1403 case Bytecodes::_l2i: // fall through
1404 case Bytecodes::_l2f: // fall through
1405 case Bytecodes::_l2d: tos_in = ltos; break;
1406 case Bytecodes::_f2i: // fall through
1407 case Bytecodes::_f2l: // fall through
1408 case Bytecodes::_f2d: tos_in = ftos; break;
1409 case Bytecodes::_d2i: // fall through
1410 case Bytecodes::_d2l: // fall through
1411 case Bytecodes::_d2f: tos_in = dtos; break;
1412 default : ShouldNotReachHere();
1413 }
1414 switch (bytecode()) {
1415 case Bytecodes::_l2i: // fall through
1416 case Bytecodes::_f2i: // fall through
1417 case Bytecodes::_d2i: // fall through
1418 case Bytecodes::_i2b: // fall through
1419 case Bytecodes::_i2c: // fall through
1420 case Bytecodes::_i2s: tos_out = itos; break;
1421 case Bytecodes::_i2l: // fall through
1422 case Bytecodes::_f2l: // fall through
1423 case Bytecodes::_d2l: tos_out = ltos; break;
1424 case Bytecodes::_i2f: // fall through
1425 case Bytecodes::_l2f: // fall through
1426 case Bytecodes::_d2f: tos_out = ftos; break;
1427 case Bytecodes::_i2d: // fall through
1428 case Bytecodes::_l2d: // fall through
1429 case Bytecodes::_f2d: tos_out = dtos; break;
1430 default : ShouldNotReachHere();
1431 }
1432 transition(tos_in, tos_out);
1433 }
1434 #endif // ASSERT
1435
1436 // Conversion
1437 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1438 switch (bytecode()) {
1439 case Bytecodes::_i2l:
1440 __ extend_sign(rdx, rax);
1441 break;
1442 case Bytecodes::_i2f:
1443 __ push(rax); // store int on tos
1444 __ fild_s(at_rsp()); // load int to ST0
1445 __ f2ieee(); // truncate to float size
1446 __ pop(rcx); // adjust rsp
1447 break;
1448 case Bytecodes::_i2d:
1449 __ push(rax); // add one slot for d2ieee()
1450 __ push(rax); // store int on tos
1451 __ fild_s(at_rsp()); // load int to ST0
1452 __ d2ieee(); // truncate to double size
1453 __ pop(rcx); // adjust rsp
1454 __ pop(rcx);
1455 break;
1456 case Bytecodes::_i2b:
1457 __ shll(rax, 24); // truncate upper 24 bits
1458 __ sarl(rax, 24); // and sign-extend byte
1459 LP64_ONLY(__ movsbl(rax, rax));
1460 break;
1461 case Bytecodes::_i2c:
1462 __ andl(rax, 0xFFFF); // truncate upper 16 bits
1463 LP64_ONLY(__ movzwl(rax, rax));
1464 break;
1465 case Bytecodes::_i2s:
1466 __ shll(rax, 16); // truncate upper 16 bits
1467 __ sarl(rax, 16); // and sign-extend short
1468 LP64_ONLY(__ movswl(rax, rax));
1469 break;
1470 case Bytecodes::_l2i:
1471 /* nothing to do */
1472 break;
1473 case Bytecodes::_l2f:
1474 __ push(rdx); // store long on tos
1475 __ push(rax);
1476 __ fild_d(at_rsp()); // load long to ST0
1477 __ f2ieee(); // truncate to float size
1478 __ pop(rcx); // adjust rsp
1479 __ pop(rcx);
1480 break;
1481 case Bytecodes::_l2d:
1482 __ push(rdx); // store long on tos
1483 __ push(rax);
1484 __ fild_d(at_rsp()); // load long to ST0
1485 __ d2ieee(); // truncate to double size
1486 __ pop(rcx); // adjust rsp
1487 __ pop(rcx);
1488 break;
1489 case Bytecodes::_f2i:
1490 __ push(rcx); // reserve space for argument
1491 __ fstp_s(at_rsp()); // pass float argument on stack
1492 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1493 break;
1494 case Bytecodes::_f2l:
1495 __ push(rcx); // reserve space for argument
1496 __ fstp_s(at_rsp()); // pass float argument on stack
1497 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1498 break;
1499 case Bytecodes::_f2d:
1500 /* nothing to do */
1501 break;
1502 case Bytecodes::_d2i:
1503 __ push(rcx); // reserve space for argument
1504 __ push(rcx);
1505 __ fstp_d(at_rsp()); // pass double argument on stack
1506 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1507 break;
1508 case Bytecodes::_d2l:
1509 __ push(rcx); // reserve space for argument
1510 __ push(rcx);
1511 __ fstp_d(at_rsp()); // pass double argument on stack
1512 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1513 break;
1514 case Bytecodes::_d2f:
1515 __ push(rcx); // reserve space for f2ieee()
1516 __ f2ieee(); // truncate to float size
1517 __ pop(rcx); // adjust rsp
1518 break;
1519 default :
1520 ShouldNotReachHere();
1521 }
1522 }
1523
1524
1525 void TemplateTable::lcmp() {
1526 transition(ltos, itos);
1527 // y = rdx:rax
1528 __ pop_l(rbx, rcx); // get x = rcx:rbx
1529 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1530 __ mov(rax, rcx);
1531 }
1532
1533
1534 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1535 if (is_float) {
1536 __ fld_s(at_rsp());
1537 } else {
1538 __ fld_d(at_rsp());
1539 __ pop(rdx);
1540 }
1541 __ pop(rcx);
1542 __ fcmp2int(rax, unordered_result < 0);
1543 }
1544
1545
1546 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1547 __ get_method(rcx); // ECX holds method
1548 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1549
1550 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1551 InvocationCounter::counter_offset();
1552 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1553 InvocationCounter::counter_offset();
1554
1555 // Load up EDX with the branch displacement
1556 if (is_wide) {
1557 __ movl(rdx, at_bcp(1));
1558 } else {
1559 __ load_signed_short(rdx, at_bcp(1));
1560 }
1561 __ bswapl(rdx);
1562 if (!is_wide) __ sarl(rdx, 16);
1563 LP64_ONLY(__ movslq(rdx, rdx));
1564
1565
1566 // Handle all the JSR stuff here, then exit.
1567 // It's much shorter and cleaner than intermingling with the
1568 // non-JSR normal-branch stuff occurring below.
1569 if (is_jsr) {
1570 // Pre-load the next target bytecode into EBX
1571 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1572
1573 // compute return address as bci in rax,
1574 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1575 __ subptr(rax, Address(rcx, Method::const_offset()));
1576 // Adjust the bcp in RSI by the displacement in EDX
1577 __ addptr(rsi, rdx);
1578 // Push return address
1579 __ push_i(rax);
1580 // jsr returns vtos
1581 __ dispatch_only_noverify(vtos);
1582 return;
1583 }
1584
1585 // Normal (non-jsr) branch handling
1586
1587 // Adjust the bcp in RSI by the displacement in EDX
1588 __ addptr(rsi, rdx);
1589
1590 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1591 Label backedge_counter_overflow;
1592 Label profile_method;
1593 Label dispatch;
1594 if (UseLoopCounter) {
1595 // increment backedge counter for backward branches
1596 // rax,: MDO
1597 // rbx,: MDO bumped taken-count
1598 // rcx: method
1599 // rdx: target offset
1600 // rsi: target bcp
1601 // rdi: locals pointer
1602 __ testl(rdx, rdx); // check if forward or backward branch
1603 __ jcc(Assembler::positive, dispatch); // count only if backward branch
1604
1605 // check if MethodCounters exists
1606 Label has_counters;
1607 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1608 __ testptr(rax, rax);
1609 __ jcc(Assembler::notZero, has_counters);
1610 __ push(rdx);
1611 __ push(rcx);
1612 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1613 rcx);
1614 __ pop(rcx);
1615 __ pop(rdx);
1616 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1617 __ testptr(rax, rax);
1618 __ jcc(Assembler::zero, dispatch);
1619 __ bind(has_counters);
1620
1621 if (TieredCompilation) {
1622 Label no_mdo;
1623 int increment = InvocationCounter::count_increment;
1624 if (ProfileInterpreter) {
1625 // Are we profiling?
1626 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1627 __ testptr(rbx, rbx);
1628 __ jccb(Assembler::zero, no_mdo);
1629 // Increment the MDO backedge counter
1630 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1631 in_bytes(InvocationCounter::counter_offset()));
1632 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
1633 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1634 rax, false, Assembler::zero, &backedge_counter_overflow);
1635 __ jmp(dispatch);
1636 }
1637 __ bind(no_mdo);
1638 // Increment backedge counter in MethodCounters*
1639 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1640 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
1641 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1642 rax, false, Assembler::zero, &backedge_counter_overflow);
1643 } else { // not TieredCompilation
1644 // increment counter
1645 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1646 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
1647 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1648 __ movl(Address(rcx, be_offset), rax); // store counter
1649
1650 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
1651
1652 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
1653 __ addl(rax, Address(rcx, be_offset)); // add both counters
1654
1655 if (ProfileInterpreter) {
1656 // Test to see if we should create a method data oop
1657 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
1658 __ jcc(Assembler::less, dispatch);
1659
1660 // if no method data exists, go to profile method
1661 __ test_method_data_pointer(rax, profile_method);
1662
1663 if (UseOnStackReplacement) {
1664 // check for overflow against rbx, which is the MDO taken count
1665 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1666 __ jcc(Assembler::below, dispatch);
1667
1668 // When ProfileInterpreter is on, the backedge_count comes from the
1669 // MethodData*, which value does not get reset on the call to
1670 // frequency_counter_overflow(). To avoid excessive calls to the overflow
1671 // routine while the method is being compiled, add a second test to make
1672 // sure the overflow function is called only once every overflow_frequency.
1673 const int overflow_frequency = 1024;
1674 __ andptr(rbx, overflow_frequency-1);
1675 __ jcc(Assembler::zero, backedge_counter_overflow);
1676 }
1677 } else {
1678 if (UseOnStackReplacement) {
1679 // check for overflow against rax, which is the sum of the counters
1680 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
1681 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1682
1683 }
1684 }
1685 }
1686 __ bind(dispatch);
1687 }
1688
1689 // Pre-load the next target bytecode into EBX
1690 __ load_unsigned_byte(rbx, Address(rsi, 0));
1691
1692 // continue with the bytecode @ target
1693 // rax,: return bci for jsr's, unused otherwise
1694 // rbx,: target bytecode
1695 // rsi: target bcp
1696 __ dispatch_only(vtos);
1697
1698 if (UseLoopCounter) {
1699 if (ProfileInterpreter) {
1700 // Out-of-line code to allocate method data oop.
1701 __ bind(profile_method);
1702 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1703 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1704 __ set_method_data_pointer_for_bcp();
1705 __ jmp(dispatch);
1706 }
1707
1708 if (UseOnStackReplacement) {
1709
1710 // invocation counter overflow
1711 __ bind(backedge_counter_overflow);
1712 __ negptr(rdx);
1713 __ addptr(rdx, rsi); // branch bcp
1714 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1715 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
1716
1717 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1718 // rbx,: target bytecode
1719 // rdx: scratch
1720 // rdi: locals pointer
1721 // rsi: bcp
1722 __ testptr(rax, rax); // test result
1723 __ jcc(Assembler::zero, dispatch); // no osr if null
1724 // nmethod may have been invalidated (VM may block upon call_VM return)
1725 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
1726 __ jcc(Assembler::notEqual, dispatch);
1727
1728 // We have the address of an on stack replacement routine in rax,
1729 // We need to prepare to execute the OSR method. First we must
1730 // migrate the locals and monitors off of the stack.
1731
1732 __ mov(rbx, rax); // save the nmethod
1733
1734 __ get_thread(rcx);
1735 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1736 // rax, is OSR buffer, move it to expected parameter location
1737 __ mov(rcx, rax);
1738
1739 // pop the interpreter frame
1740 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1741 __ leave(); // remove frame anchor
1742 __ pop(rdi); // get return address
1743 __ mov(rsp, rdx); // set sp to sender sp
1744
1745 // Align stack pointer for compiled code (note that caller is
1746 // responsible for undoing this fixup by remembering the old SP
1747 // in an rbp,-relative location)
1748 __ andptr(rsp, -(StackAlignmentInBytes));
1749
1750 // push the (possibly adjusted) return address
1751 __ push(rdi);
1752
1753 // and begin the OSR nmethod
1754 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1755 }
1756 }
1757 }
1758
1759
1760 void TemplateTable::if_0cmp(Condition cc) {
1761 transition(itos, vtos);
1762 // assume branch is more often taken than not (loops use backward branches)
1763 Label not_taken;
1764 __ testl(rax, rax);
1765 __ jcc(j_not(cc), not_taken);
1766 branch(false, false);
1767 __ bind(not_taken);
1768 __ profile_not_taken_branch(rax);
1769 }
1770
1771
1772 void TemplateTable::if_icmp(Condition cc) {
1773 transition(itos, vtos);
1774 // assume branch is more often taken than not (loops use backward branches)
1775 Label not_taken;
1776 __ pop_i(rdx);
1777 __ cmpl(rdx, rax);
1778 __ jcc(j_not(cc), not_taken);
1779 branch(false, false);
1780 __ bind(not_taken);
1781 __ profile_not_taken_branch(rax);
1782 }
1783
1784
1785 void TemplateTable::if_nullcmp(Condition cc) {
1786 transition(atos, vtos);
1787 // assume branch is more often taken than not (loops use backward branches)
1788 Label not_taken;
1789 __ testptr(rax, rax);
1790 __ jcc(j_not(cc), not_taken);
1791 branch(false, false);
1792 __ bind(not_taken);
1793 __ profile_not_taken_branch(rax);
1794 }
1795
1796
1797 void TemplateTable::if_acmp(Condition cc) {
1798 transition(atos, vtos);
1799 // assume branch is more often taken than not (loops use backward branches)
1800 Label not_taken;
1801 __ pop_ptr(rdx);
1802 __ cmpptr(rdx, rax);
1803 __ jcc(j_not(cc), not_taken);
1804 branch(false, false);
1805 __ bind(not_taken);
1806 __ profile_not_taken_branch(rax);
1807 }
1808
1809
1810 void TemplateTable::ret() {
1811 transition(vtos, vtos);
1812 locals_index(rbx);
1813 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1814 __ profile_ret(rbx, rcx);
1815 __ get_method(rax);
1816 __ movptr(rsi, Address(rax, Method::const_offset()));
1817 __ lea(rsi, Address(rsi, rbx, Address::times_1,
1818 ConstMethod::codes_offset()));
1819 __ dispatch_next(vtos);
1820 }
1821
1822
1823 void TemplateTable::wide_ret() {
1824 transition(vtos, vtos);
1825 locals_index_wide(rbx);
1826 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
1827 __ profile_ret(rbx, rcx);
1828 __ get_method(rax);
1829 __ movptr(rsi, Address(rax, Method::const_offset()));
1830 __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset()));
1831 __ dispatch_next(vtos);
1832 }
1833
1834
1835 void TemplateTable::tableswitch() {
1836 Label default_case, continue_execution;
1837 transition(itos, vtos);
1838 // align rsi
1839 __ lea(rbx, at_bcp(wordSize));
1840 __ andptr(rbx, -wordSize);
1841 // load lo & hi
1842 __ movl(rcx, Address(rbx, 1 * wordSize));
1843 __ movl(rdx, Address(rbx, 2 * wordSize));
1844 __ bswapl(rcx);
1845 __ bswapl(rdx);
1846 // check against lo & hi
1847 __ cmpl(rax, rcx);
1848 __ jccb(Assembler::less, default_case);
1849 __ cmpl(rax, rdx);
1850 __ jccb(Assembler::greater, default_case);
1851 // lookup dispatch offset
1852 __ subl(rax, rcx);
1853 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1854 __ profile_switch_case(rax, rbx, rcx);
1855 // continue execution
1856 __ bind(continue_execution);
1857 __ bswapl(rdx);
1858 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1859 __ addptr(rsi, rdx);
1860 __ dispatch_only(vtos);
1861 // handle default
1862 __ bind(default_case);
1863 __ profile_switch_default(rax);
1864 __ movl(rdx, Address(rbx, 0));
1865 __ jmp(continue_execution);
1866 }
1867
1868
1869 void TemplateTable::lookupswitch() {
1870 transition(itos, itos);
1871 __ stop("lookupswitch bytecode should have been rewritten");
1872 }
1873
1874
1875 void TemplateTable::fast_linearswitch() {
1876 transition(itos, vtos);
1877 Label loop_entry, loop, found, continue_execution;
1878 // bswapl rax, so we can avoid bswapping the table entries
1879 __ bswapl(rax);
1880 // align rsi
1881 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1882 __ andptr(rbx, -wordSize);
1883 // set counter
1884 __ movl(rcx, Address(rbx, wordSize));
1885 __ bswapl(rcx);
1886 __ jmpb(loop_entry);
1887 // table search
1888 __ bind(loop);
1889 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1890 __ jccb(Assembler::equal, found);
1891 __ bind(loop_entry);
1892 __ decrementl(rcx);
1893 __ jcc(Assembler::greaterEqual, loop);
1894 // default case
1895 __ profile_switch_default(rax);
1896 __ movl(rdx, Address(rbx, 0));
1897 __ jmpb(continue_execution);
1898 // entry found -> get offset
1899 __ bind(found);
1900 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1901 __ profile_switch_case(rcx, rax, rbx);
1902 // continue execution
1903 __ bind(continue_execution);
1904 __ bswapl(rdx);
1905 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1906 __ addptr(rsi, rdx);
1907 __ dispatch_only(vtos);
1908 }
1909
1910
1911 void TemplateTable::fast_binaryswitch() {
1912 transition(itos, vtos);
1913 // Implementation using the following core algorithm:
1914 //
1915 // int binary_search(int key, LookupswitchPair* array, int n) {
1916 // // Binary search according to "Methodik des Programmierens" by
1917 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1918 // int i = 0;
1919 // int j = n;
1920 // while (i+1 < j) {
1921 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1922 // // with Q: for all i: 0 <= i < n: key < a[i]
1923 // // where a stands for the array and assuming that the (inexisting)
1924 // // element a[n] is infinitely big.
1925 // int h = (i + j) >> 1;
1926 // // i < h < j
1927 // if (key < array[h].fast_match()) {
1928 // j = h;
1929 // } else {
1930 // i = h;
1931 // }
1932 // }
1933 // // R: a[i] <= key < a[i+1] or Q
1934 // // (i.e., if key is within array, i is the correct index)
1935 // return i;
1936 // }
1937
1938 // register allocation
1939 const Register key = rax; // already set (tosca)
1940 const Register array = rbx;
1941 const Register i = rcx;
1942 const Register j = rdx;
1943 const Register h = rdi; // needs to be restored
1944 const Register temp = rsi;
1945 // setup array
1946 __ save_bcp();
1947
1948 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
1949 __ andptr(array, -wordSize);
1950 // initialize i & j
1951 __ xorl(i, i); // i = 0;
1952 __ movl(j, Address(array, -wordSize)); // j = length(array);
1953 // Convert j into native byteordering
1954 __ bswapl(j);
1955 // and start
1956 Label entry;
1957 __ jmp(entry);
1958
1959 // binary search loop
1960 { Label loop;
1961 __ bind(loop);
1962 // int h = (i + j) >> 1;
1963 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1964 __ sarl(h, 1); // h = (i + j) >> 1;
1965 // if (key < array[h].fast_match()) {
1966 // j = h;
1967 // } else {
1968 // i = h;
1969 // }
1970 // Convert array[h].match to native byte-ordering before compare
1971 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1972 __ bswapl(temp);
1973 __ cmpl(key, temp);
1974 // j = h if (key < array[h].fast_match())
1975 __ cmov32(Assembler::less , j, h);
1976 // i = h if (key >= array[h].fast_match())
1977 __ cmov32(Assembler::greaterEqual, i, h);
1978 // while (i+1 < j)
1979 __ bind(entry);
1980 __ leal(h, Address(i, 1)); // i+1
1981 __ cmpl(h, j); // i+1 < j
1982 __ jcc(Assembler::less, loop);
1983 }
1984
1985 // end of binary search, result index is i (must check again!)
1986 Label default_case;
1987 // Convert array[i].match to native byte-ordering before compare
1988 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1989 __ bswapl(temp);
1990 __ cmpl(key, temp);
1991 __ jcc(Assembler::notEqual, default_case);
1992
1993 // entry found -> j = offset
1994 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1995 __ profile_switch_case(i, key, array);
1996 __ bswapl(j);
1997 LP64_ONLY(__ movslq(j, j));
1998 __ restore_bcp();
1999 __ restore_locals(); // restore rdi
2000 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2001
2002 __ addptr(rsi, j);
2003 __ dispatch_only(vtos);
2004
2005 // default case -> j = default offset
2006 __ bind(default_case);
2007 __ profile_switch_default(i);
2008 __ movl(j, Address(array, -2*wordSize));
2009 __ bswapl(j);
2010 LP64_ONLY(__ movslq(j, j));
2011 __ restore_bcp();
2012 __ restore_locals(); // restore rdi
2013 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2014 __ addptr(rsi, j);
2015 __ dispatch_only(vtos);
2016 }
2017
2018
2019 void TemplateTable::_return(TosState state) {
2020 transition(state, state);
2021 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2022
2023 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2024 assert(state == vtos, "only valid state");
2025 __ movptr(rax, aaddress(0));
2026 __ load_klass(rdi, rax);
2027 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2028 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2029 Label skip_register_finalizer;
2030 __ jcc(Assembler::zero, skip_register_finalizer);
2031
2032 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2033
2034 __ bind(skip_register_finalizer);
2035 }
2036
2037 __ remove_activation(state, rsi);
2038 __ jmp(rsi);
2039 }
2040
2041
2042 // ----------------------------------------------------------------------------
2043 // Volatile variables demand their effects be made known to all CPU's in
2044 // order. Store buffers on most chips allow reads & writes to reorder; the
2045 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2046 // memory barrier (i.e., it's not sufficient that the interpreter does not
2047 // reorder volatile references, the hardware also must not reorder them).
2048 //
2049 // According to the new Java Memory Model (JMM):
2050 // (1) All volatiles are serialized wrt to each other.
2051 // ALSO reads & writes act as aquire & release, so:
2052 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2053 // the read float up to before the read. It's OK for non-volatile memory refs
2054 // that happen before the volatile read to float down below it.
2055 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2056 // that happen BEFORE the write float down to after the write. It's OK for
2057 // non-volatile memory refs that happen after the volatile write to float up
2058 // before it.
2059 //
2060 // We only put in barriers around volatile refs (they are expensive), not
2061 // _between_ memory refs (that would require us to track the flavor of the
2062 // previous memory refs). Requirements (2) and (3) require some barriers
2063 // before volatile stores and after volatile loads. These nearly cover
2064 // requirement (1) but miss the volatile-store-volatile-load case. This final
2065 // case is placed after volatile-stores although it could just as well go
2066 // before volatile-loads.
2067 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2068 // Helper function to insert a is-volatile test and memory barrier
2069 if( !os::is_MP() ) return; // Not needed on single CPU
2070 __ membar(order_constraint);
2071 }
2072
2073 void TemplateTable::resolve_cache_and_index(int byte_no,
2074 Register Rcache,
2075 Register index,
2076 size_t index_size) {
2077 const Register temp = rbx;
2078 assert_different_registers(Rcache, index, temp);
2079
2080 Label resolved;
2081 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2082 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2083 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode?
2084 __ jcc(Assembler::equal, resolved);
2085
2086 // resolve first time through
2087 address entry;
2088 switch (bytecode()) {
2089 case Bytecodes::_getstatic : // fall through
2090 case Bytecodes::_putstatic : // fall through
2091 case Bytecodes::_getfield : // fall through
2092 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2093 case Bytecodes::_invokevirtual : // fall through
2094 case Bytecodes::_invokespecial : // fall through
2095 case Bytecodes::_invokestatic : // fall through
2096 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2097 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2098 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2099 default:
2100 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2101 break;
2102 }
2103 __ movl(temp, (int)bytecode());
2104 __ call_VM(noreg, entry, temp);
2105 // Update registers with resolved info
2106 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2107 __ bind(resolved);
2108 }
2109
2110
2111 // The cache and index registers must be set before call
2112 void TemplateTable::load_field_cp_cache_entry(Register obj,
2113 Register cache,
2114 Register index,
2115 Register off,
2116 Register flags,
2117 bool is_static = false) {
2118 assert_different_registers(cache, index, flags, off);
2119
2120 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2121 // Field offset
2122 __ movptr(off, Address(cache, index, Address::times_ptr,
2123 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2124 // Flags
2125 __ movl(flags, Address(cache, index, Address::times_ptr,
2126 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2127
2128 // klass overwrite register
2129 if (is_static) {
2130 __ movptr(obj, Address(cache, index, Address::times_ptr,
2131 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2132 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2133 __ movptr(obj, Address(obj, mirror_offset));
2134 }
2135 }
2136
2137 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2138 Register method,
2139 Register itable_index,
2140 Register flags,
2141 bool is_invokevirtual,
2142 bool is_invokevfinal, /*unused*/
2143 bool is_invokedynamic) {
2144 // setup registers
2145 const Register cache = rcx;
2146 const Register index = rdx;
2147 assert_different_registers(method, flags);
2148 assert_different_registers(method, cache, index);
2149 assert_different_registers(itable_index, flags);
2150 assert_different_registers(itable_index, cache, index);
2151 // determine constant pool cache field offsets
2152 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2153 const int method_offset = in_bytes(
2154 ConstantPoolCache::base_offset() +
2155 ((byte_no == f2_byte)
2156 ? ConstantPoolCacheEntry::f2_offset()
2157 : ConstantPoolCacheEntry::f1_offset()));
2158 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2159 ConstantPoolCacheEntry::flags_offset());
2160 // access constant pool cache fields
2161 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2162 ConstantPoolCacheEntry::f2_offset());
2163
2164 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2165 resolve_cache_and_index(byte_no, cache, index, index_size);
2166 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2167
2168 if (itable_index != noreg) {
2169 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2170 }
2171 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2172 }
2173
2174
2175 // The registers cache and index expected to be set before call.
2176 // Correct values of the cache and index registers are preserved.
2177 void TemplateTable::jvmti_post_field_access(Register cache,
2178 Register index,
2179 bool is_static,
2180 bool has_tos) {
2181 if (JvmtiExport::can_post_field_access()) {
2182 // Check to see if a field access watch has been set before we take
2183 // the time to call into the VM.
2184 Label L1;
2185 assert_different_registers(cache, index, rax);
2186 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2187 __ testl(rax,rax);
2188 __ jcc(Assembler::zero, L1);
2189
2190 // cache entry pointer
2191 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2192 __ shll(index, LogBytesPerWord);
2193 __ addptr(cache, index);
2194 if (is_static) {
2195 __ xorptr(rax, rax); // NULL object reference
2196 } else {
2197 __ pop(atos); // Get the object
2198 __ verify_oop(rax);
2199 __ push(atos); // Restore stack state
2200 }
2201 // rax,: object pointer or NULL
2202 // cache: cache entry pointer
2203 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2204 rax, cache);
2205 __ get_cache_and_index_at_bcp(cache, index, 1);
2206 __ bind(L1);
2207 }
2208 }
2209
2210 void TemplateTable::pop_and_check_object(Register r) {
2211 __ pop_ptr(r);
2212 __ null_check(r); // for field access must check obj.
2213 __ verify_oop(r);
2214 }
2215
2216 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2217 transition(vtos, vtos);
2218
2219 const Register cache = rcx;
2220 const Register index = rdx;
2221 const Register obj = rcx;
2222 const Register off = rbx;
2223 const Register flags = rax;
2224
2225 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2226 jvmti_post_field_access(cache, index, is_static, false);
2227 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2228
2229 if (!is_static) pop_and_check_object(obj);
2230
2231 const Address lo(obj, off, Address::times_1, 0*wordSize);
2232 const Address hi(obj, off, Address::times_1, 1*wordSize);
2233
2234 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2235
2236 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2237 assert(btos == 0, "change code, btos != 0");
2238 // btos
2239 __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2240 __ jcc(Assembler::notZero, notByte);
2241
2242 __ load_signed_byte(rax, lo );
2243 __ push(btos);
2244 // Rewrite bytecode to be faster
2245 if (!is_static) {
2246 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2247 }
2248 __ jmp(Done);
2249
2250 __ bind(notByte);
2251 // itos
2252 __ cmpl(flags, itos );
2253 __ jcc(Assembler::notEqual, notInt);
2254
2255 __ movl(rax, lo );
2256 __ push(itos);
2257 // Rewrite bytecode to be faster
2258 if (!is_static) {
2259 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2260 }
2261 __ jmp(Done);
2262
2263 __ bind(notInt);
2264 // atos
2265 __ cmpl(flags, atos );
2266 __ jcc(Assembler::notEqual, notObj);
2267
2268 __ movl(rax, lo );
2269 __ push(atos);
2270 if (!is_static) {
2271 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2272 }
2273 __ jmp(Done);
2274
2275 __ bind(notObj);
2276 // ctos
2277 __ cmpl(flags, ctos );
2278 __ jcc(Assembler::notEqual, notChar);
2279
2280 __ load_unsigned_short(rax, lo );
2281 __ push(ctos);
2282 if (!is_static) {
2283 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2284 }
2285 __ jmp(Done);
2286
2287 __ bind(notChar);
2288 // stos
2289 __ cmpl(flags, stos );
2290 __ jcc(Assembler::notEqual, notShort);
2291
2292 __ load_signed_short(rax, lo );
2293 __ push(stos);
2294 if (!is_static) {
2295 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2296 }
2297 __ jmp(Done);
2298
2299 __ bind(notShort);
2300 // ltos
2301 __ cmpl(flags, ltos );
2302 __ jcc(Assembler::notEqual, notLong);
2303
2304 // Generate code as if volatile. There just aren't enough registers to
2305 // save that information and this code is faster than the test.
2306 __ fild_d(lo); // Must load atomically
2307 __ subptr(rsp,2*wordSize); // Make space for store
2308 __ fistp_d(Address(rsp,0));
2309 __ pop(rax);
2310 __ pop(rdx);
2311
2312 __ push(ltos);
2313 // Don't rewrite to _fast_lgetfield for potential volatile case.
2314 __ jmp(Done);
2315
2316 __ bind(notLong);
2317 // ftos
2318 __ cmpl(flags, ftos );
2319 __ jcc(Assembler::notEqual, notFloat);
2320
2321 __ fld_s(lo);
2322 __ push(ftos);
2323 if (!is_static) {
2324 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2325 }
2326 __ jmp(Done);
2327
2328 __ bind(notFloat);
2329 // dtos
2330 __ cmpl(flags, dtos );
2331 __ jcc(Assembler::notEqual, notDouble);
2332
2333 __ fld_d(lo);
2334 __ push(dtos);
2335 if (!is_static) {
2336 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2337 }
2338 __ jmpb(Done);
2339
2340 __ bind(notDouble);
2341
2342 __ stop("Bad state");
2343
2344 __ bind(Done);
2345 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2346 // volatile_barrier( );
2347 }
2348
2349
2350 void TemplateTable::getfield(int byte_no) {
2351 getfield_or_static(byte_no, false);
2352 }
2353
2354
2355 void TemplateTable::getstatic(int byte_no) {
2356 getfield_or_static(byte_no, true);
2357 }
2358
2359 // The registers cache and index expected to be set before call.
2360 // The function may destroy various registers, just not the cache and index registers.
2361 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2362
2363 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2364
2365 if (JvmtiExport::can_post_field_modification()) {
2366 // Check to see if a field modification watch has been set before we take
2367 // the time to call into the VM.
2368 Label L1;
2369 assert_different_registers(cache, index, rax);
2370 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2371 __ testl(rax, rax);
2372 __ jcc(Assembler::zero, L1);
2373
2374 // The cache and index registers have been already set.
2375 // This allows to eliminate this call but the cache and index
2376 // registers have to be correspondingly used after this line.
2377 __ get_cache_and_index_at_bcp(rax, rdx, 1);
2378
2379 if (is_static) {
2380 // Life is simple. Null out the object pointer.
2381 __ xorptr(rbx, rbx);
2382 } else {
2383 // Life is harder. The stack holds the value on top, followed by the object.
2384 // We don't know the size of the value, though; it could be one or two words
2385 // depending on its type. As a result, we must find the type to determine where
2386 // the object is.
2387 Label two_word, valsize_known;
2388 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2389 ConstantPoolCacheEntry::flags_offset())));
2390 __ mov(rbx, rsp);
2391 __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
2392 // Make sure we don't need to mask rcx after the above shift
2393 ConstantPoolCacheEntry::verify_tos_state_shift();
2394 __ cmpl(rcx, ltos);
2395 __ jccb(Assembler::equal, two_word);
2396 __ cmpl(rcx, dtos);
2397 __ jccb(Assembler::equal, two_word);
2398 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2399 __ jmpb(valsize_known);
2400
2401 __ bind(two_word);
2402 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2403
2404 __ bind(valsize_known);
2405 // setup object pointer
2406 __ movptr(rbx, Address(rbx, 0));
2407 }
2408 // cache entry pointer
2409 __ addptr(rax, in_bytes(cp_base_offset));
2410 __ shll(rdx, LogBytesPerWord);
2411 __ addptr(rax, rdx);
2412 // object (tos)
2413 __ mov(rcx, rsp);
2414 // rbx,: object pointer set up above (NULL if static)
2415 // rax,: cache entry pointer
2416 // rcx: jvalue object on the stack
2417 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2418 rbx, rax, rcx);
2419 __ get_cache_and_index_at_bcp(cache, index, 1);
2420 __ bind(L1);
2421 }
2422 }
2423
2424
2425 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2426 transition(vtos, vtos);
2427
2428 const Register cache = rcx;
2429 const Register index = rdx;
2430 const Register obj = rcx;
2431 const Register off = rbx;
2432 const Register flags = rax;
2433
2434 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2435 jvmti_post_field_mod(cache, index, is_static);
2436 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2437
2438 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2439 // volatile_barrier( );
2440
2441 Label notVolatile, Done;
2442 __ movl(rdx, flags);
2443 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2444 __ andl(rdx, 0x1);
2445
2446 // field addresses
2447 const Address lo(obj, off, Address::times_1, 0*wordSize);
2448 const Address hi(obj, off, Address::times_1, 1*wordSize);
2449
2450 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2451
2452 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2453 assert(btos == 0, "change code, btos != 0");
2454 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2455 __ jcc(Assembler::notZero, notByte);
2456
2457 // btos
2458 {
2459 __ pop(btos);
2460 if (!is_static) pop_and_check_object(obj);
2461 __ movb(lo, rax);
2462 if (!is_static) {
2463 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2464 }
2465 __ jmp(Done);
2466 }
2467
2468 __ bind(notByte);
2469 __ cmpl(flags, itos);
2470 __ jcc(Assembler::notEqual, notInt);
2471
2472 // itos
2473 {
2474 __ pop(itos);
2475 if (!is_static) pop_and_check_object(obj);
2476 __ movl(lo, rax);
2477 if (!is_static) {
2478 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2479 }
2480 __ jmp(Done);
2481 }
2482
2483 __ bind(notInt);
2484 __ cmpl(flags, atos);
2485 __ jcc(Assembler::notEqual, notObj);
2486
2487 // atos
2488 {
2489 __ pop(atos);
2490 if (!is_static) pop_and_check_object(obj);
2491 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2492 if (!is_static) {
2493 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2494 }
2495 __ jmp(Done);
2496 }
2497
2498 __ bind(notObj);
2499 __ cmpl(flags, ctos);
2500 __ jcc(Assembler::notEqual, notChar);
2501
2502 // ctos
2503 {
2504 __ pop(ctos);
2505 if (!is_static) pop_and_check_object(obj);
2506 __ movw(lo, rax);
2507 if (!is_static) {
2508 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2509 }
2510 __ jmp(Done);
2511 }
2512
2513 __ bind(notChar);
2514 __ cmpl(flags, stos);
2515 __ jcc(Assembler::notEqual, notShort);
2516
2517 // stos
2518 {
2519 __ pop(stos);
2520 if (!is_static) pop_and_check_object(obj);
2521 __ movw(lo, rax);
2522 if (!is_static) {
2523 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2524 }
2525 __ jmp(Done);
2526 }
2527
2528 __ bind(notShort);
2529 __ cmpl(flags, ltos);
2530 __ jcc(Assembler::notEqual, notLong);
2531
2532 // ltos
2533 {
2534 Label notVolatileLong;
2535 __ testl(rdx, rdx);
2536 __ jcc(Assembler::zero, notVolatileLong);
2537
2538 __ pop(ltos); // overwrites rdx, do this after testing volatile.
2539 if (!is_static) pop_and_check_object(obj);
2540
2541 // Replace with real volatile test
2542 __ push(rdx);
2543 __ push(rax); // Must update atomically with FIST
2544 __ fild_d(Address(rsp,0)); // So load into FPU register
2545 __ fistp_d(lo); // and put into memory atomically
2546 __ addptr(rsp, 2*wordSize);
2547 // volatile_barrier();
2548 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2549 Assembler::StoreStore));
2550 // Don't rewrite volatile version
2551 __ jmp(notVolatile);
2552
2553 __ bind(notVolatileLong);
2554
2555 __ pop(ltos); // overwrites rdx
2556 if (!is_static) pop_and_check_object(obj);
2557 NOT_LP64(__ movptr(hi, rdx));
2558 __ movptr(lo, rax);
2559 if (!is_static) {
2560 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2561 }
2562 __ jmp(notVolatile);
2563 }
2564
2565 __ bind(notLong);
2566 __ cmpl(flags, ftos);
2567 __ jcc(Assembler::notEqual, notFloat);
2568
2569 // ftos
2570 {
2571 __ pop(ftos);
2572 if (!is_static) pop_and_check_object(obj);
2573 __ fstp_s(lo);
2574 if (!is_static) {
2575 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2576 }
2577 __ jmp(Done);
2578 }
2579
2580 __ bind(notFloat);
2581 #ifdef ASSERT
2582 __ cmpl(flags, dtos);
2583 __ jcc(Assembler::notEqual, notDouble);
2584 #endif
2585
2586 // dtos
2587 {
2588 __ pop(dtos);
2589 if (!is_static) pop_and_check_object(obj);
2590 __ fstp_d(lo);
2591 if (!is_static) {
2592 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2593 }
2594 __ jmp(Done);
2595 }
2596
2597 #ifdef ASSERT
2598 __ bind(notDouble);
2599 __ stop("Bad state");
2600 #endif
2601
2602 __ bind(Done);
2603
2604 // Check for volatile store
2605 __ testl(rdx, rdx);
2606 __ jcc(Assembler::zero, notVolatile);
2607 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2608 Assembler::StoreStore));
2609 __ bind(notVolatile);
2610 }
2611
2612
2613 void TemplateTable::putfield(int byte_no) {
2614 putfield_or_static(byte_no, false);
2615 }
2616
2617
2618 void TemplateTable::putstatic(int byte_no) {
2619 putfield_or_static(byte_no, true);
2620 }
2621
2622 void TemplateTable::jvmti_post_fast_field_mod() {
2623 if (JvmtiExport::can_post_field_modification()) {
2624 // Check to see if a field modification watch has been set before we take
2625 // the time to call into the VM.
2626 Label L2;
2627 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2628 __ testl(rcx,rcx);
2629 __ jcc(Assembler::zero, L2);
2630 __ pop_ptr(rbx); // copy the object pointer from tos
2631 __ verify_oop(rbx);
2632 __ push_ptr(rbx); // put the object pointer back on tos
2633
2634 // Save tos values before call_VM() clobbers them. Since we have
2635 // to do it for every data type, we use the saved values as the
2636 // jvalue object.
2637 switch (bytecode()) { // load values into the jvalue object
2638 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2639 case Bytecodes::_fast_bputfield: // fall through
2640 case Bytecodes::_fast_sputfield: // fall through
2641 case Bytecodes::_fast_cputfield: // fall through
2642 case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2643 case Bytecodes::_fast_dputfield: __ push_d(); break;
2644 case Bytecodes::_fast_fputfield: __ push_f(); break;
2645 case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2646
2647 default:
2648 ShouldNotReachHere();
2649 }
2650 __ mov(rcx, rsp); // points to jvalue on the stack
2651 // access constant pool cache entry
2652 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2653 __ verify_oop(rbx);
2654 // rbx,: object pointer copied above
2655 // rax,: cache entry pointer
2656 // rcx: jvalue object on the stack
2657 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2658
2659 switch (bytecode()) { // restore tos values
2660 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2661 case Bytecodes::_fast_bputfield: // fall through
2662 case Bytecodes::_fast_sputfield: // fall through
2663 case Bytecodes::_fast_cputfield: // fall through
2664 case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2665 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2666 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2667 case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2668 }
2669 __ bind(L2);
2670 }
2671 }
2672
2673 void TemplateTable::fast_storefield(TosState state) {
2674 transition(state, vtos);
2675
2676 ByteSize base = ConstantPoolCache::base_offset();
2677
2678 jvmti_post_fast_field_mod();
2679
2680 // access constant pool cache
2681 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2682
2683 // test for volatile with rdx but rdx is tos register for lputfield.
2684 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2685 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2686 ConstantPoolCacheEntry::flags_offset())));
2687
2688 // replace index with field offset from cache entry
2689 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2690
2691 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2692 // volatile_barrier( );
2693
2694 Label notVolatile, Done;
2695 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2696 __ andl(rdx, 0x1);
2697 // Check for volatile store
2698 __ testl(rdx, rdx);
2699 __ jcc(Assembler::zero, notVolatile);
2700
2701 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2702
2703 // Get object from stack
2704 pop_and_check_object(rcx);
2705
2706 // field addresses
2707 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2708 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2709
2710 // access field
2711 switch (bytecode()) {
2712 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2713 case Bytecodes::_fast_sputfield: // fall through
2714 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2715 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2716 case Bytecodes::_fast_lputfield:
2717 NOT_LP64(__ movptr(hi, rdx));
2718 __ movptr(lo, rax);
2719 break;
2720 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2721 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2722 case Bytecodes::_fast_aputfield: {
2723 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2724 break;
2725 }
2726 default:
2727 ShouldNotReachHere();
2728 }
2729
2730 Label done;
2731 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2732 Assembler::StoreStore));
2733 // Barriers are so large that short branch doesn't reach!
2734 __ jmp(done);
2735
2736 // Same code as above, but don't need rdx to test for volatile.
2737 __ bind(notVolatile);
2738
2739 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2740
2741 // Get object from stack
2742 pop_and_check_object(rcx);
2743
2744 // access field
2745 switch (bytecode()) {
2746 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2747 case Bytecodes::_fast_sputfield: // fall through
2748 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2749 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2750 case Bytecodes::_fast_lputfield:
2751 NOT_LP64(__ movptr(hi, rdx));
2752 __ movptr(lo, rax);
2753 break;
2754 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2755 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2756 case Bytecodes::_fast_aputfield: {
2757 do_oop_store(_masm, lo, rax, _bs->kind(), false);
2758 break;
2759 }
2760 default:
2761 ShouldNotReachHere();
2762 }
2763 __ bind(done);
2764 }
2765
2766
2767 void TemplateTable::fast_accessfield(TosState state) {
2768 transition(atos, state);
2769
2770 // do the JVMTI work here to avoid disturbing the register state below
2771 if (JvmtiExport::can_post_field_access()) {
2772 // Check to see if a field access watch has been set before we take
2773 // the time to call into the VM.
2774 Label L1;
2775 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2776 __ testl(rcx,rcx);
2777 __ jcc(Assembler::zero, L1);
2778 // access constant pool cache entry
2779 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2780 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
2781 __ verify_oop(rax);
2782 // rax,: object pointer copied above
2783 // rcx: cache entry pointer
2784 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2785 __ pop_ptr(rax); // restore object pointer
2786 __ bind(L1);
2787 }
2788
2789 // access constant pool cache
2790 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2791 // replace index with field offset from cache entry
2792 __ movptr(rbx, Address(rcx,
2793 rbx,
2794 Address::times_ptr,
2795 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2796
2797
2798 // rax,: object
2799 __ verify_oop(rax);
2800 __ null_check(rax);
2801 // field addresses
2802 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2803 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2804
2805 // access field
2806 switch (bytecode()) {
2807 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
2808 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
2809 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
2810 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
2811 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
2812 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
2813 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
2814 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2815 default:
2816 ShouldNotReachHere();
2817 }
2818
2819 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2820 // volatile_barrier( );
2821 }
2822
2823 void TemplateTable::fast_xaccess(TosState state) {
2824 transition(vtos, state);
2825 // get receiver
2826 __ movptr(rax, aaddress(0));
2827 // access constant pool cache
2828 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2829 __ movptr(rbx, Address(rcx,
2830 rdx,
2831 Address::times_ptr,
2832 in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2833 // make sure exception is reported in correct bcp range (getfield is next instruction)
2834 __ increment(rsi);
2835 __ null_check(rax);
2836 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2837 if (state == itos) {
2838 __ movl(rax, lo);
2839 } else if (state == atos) {
2840 __ movptr(rax, lo);
2841 __ verify_oop(rax);
2842 } else if (state == ftos) {
2843 __ fld_s(lo);
2844 } else {
2845 ShouldNotReachHere();
2846 }
2847 __ decrement(rsi);
2848 }
2849
2850
2851
2852 //----------------------------------------------------------------------------------------------------
2853 // Calls
2854
2855 void TemplateTable::count_calls(Register method, Register temp) {
2856 // implemented elsewhere
2857 ShouldNotReachHere();
2858 }
2859
2860
2861 void TemplateTable::prepare_invoke(int byte_no,
2862 Register method, // linked method (or i-klass)
2863 Register index, // itable index, MethodType, etc.
2864 Register recv, // if caller wants to see it
2865 Register flags // if caller wants to test it
2866 ) {
2867 // determine flags
2868 const Bytecodes::Code code = bytecode();
2869 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2870 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2871 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2872 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2873 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2874 const bool load_receiver = (recv != noreg);
2875 const bool save_flags = (flags != noreg);
2876 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2877 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2878 assert(flags == noreg || flags == rdx, "");
2879 assert(recv == noreg || recv == rcx, "");
2880
2881 // setup registers & access constant pool cache
2882 if (recv == noreg) recv = rcx;
2883 if (flags == noreg) flags = rdx;
2884 assert_different_registers(method, index, recv, flags);
2885
2886 // save 'interpreter return address'
2887 __ save_bcp();
2888
2889 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2890
2891 // maybe push appendix to arguments (just before return address)
2892 if (is_invokedynamic || is_invokehandle) {
2893 Label L_no_push;
2894 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2895 __ jccb(Assembler::zero, L_no_push);
2896 // Push the appendix as a trailing parameter.
2897 // This must be done before we get the receiver,
2898 // since the parameter_size includes it.
2899 __ push(rbx);
2900 __ mov(rbx, index);
2901 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2902 __ load_resolved_reference_at_index(index, rbx);
2903 __ pop(rbx);
2904 __ push(index); // push appendix (MethodType, CallSite, etc.)
2905 __ bind(L_no_push);
2906 }
2907
2908 // load receiver if needed (note: no return address pushed yet)
2909 if (load_receiver) {
2910 __ movl(recv, flags);
2911 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2912 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address
2913 const int receiver_is_at_end = -1; // back off one slot to get receiver
2914 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2915 __ movptr(recv, recv_addr);
2916 __ verify_oop(recv);
2917 }
2918
2919 if (save_flags) {
2920 __ mov(rsi, flags);
2921 }
2922
2923 // compute return type
2924 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2925 // Make sure we don't need to mask flags after the above shift
2926 ConstantPoolCacheEntry::verify_tos_state_shift();
2927 // load return address
2928 {
2929 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2930 ExternalAddress table(table_addr);
2931 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2932 }
2933
2934 // push return address
2935 __ push(flags);
2936
2937 // Restore flags value from the constant pool cache, and restore rsi
2938 // for later null checks. rsi is the bytecode pointer
2939 if (save_flags) {
2940 __ mov(flags, rsi);
2941 __ restore_bcp();
2942 }
2943 }
2944
2945
2946 void TemplateTable::invokevirtual_helper(Register index,
2947 Register recv,
2948 Register flags) {
2949 // Uses temporary registers rax, rdx
2950 assert_different_registers(index, recv, rax, rdx);
2951 assert(index == rbx, "");
2952 assert(recv == rcx, "");
2953
2954 // Test for an invoke of a final method
2955 Label notFinal;
2956 __ movl(rax, flags);
2957 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2958 __ jcc(Assembler::zero, notFinal);
2959
2960 const Register method = index; // method must be rbx
2961 assert(method == rbx,
2962 "Method* must be rbx for interpreter calling convention");
2963
2964 // do the call - the index is actually the method to call
2965 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
2966
2967 // It's final, need a null check here!
2968 __ null_check(recv);
2969
2970 // profile this call
2971 __ profile_final_call(rax);
2972 __ profile_arguments_type(rax, method, rsi, true);
2973
2974 __ jump_from_interpreted(method, rax);
2975
2976 __ bind(notFinal);
2977
2978 // get receiver klass
2979 __ null_check(recv, oopDesc::klass_offset_in_bytes());
2980 __ load_klass(rax, recv);
2981
2982 // profile this call
2983 __ profile_virtual_call(rax, rdi, rdx);
2984
2985 // get target Method* & entry point
2986 __ lookup_virtual_method(rax, index, method);
2987 __ profile_arguments_type(rdx, method, rsi, true);
2988 __ jump_from_interpreted(method, rdx);
2989 }
2990
2991
2992 void TemplateTable::invokevirtual(int byte_no) {
2993 transition(vtos, vtos);
2994 assert(byte_no == f2_byte, "use this argument");
2995 prepare_invoke(byte_no,
2996 rbx, // method or vtable index
2997 noreg, // unused itable index
2998 rcx, rdx); // recv, flags
2999
3000 // rbx: index
3001 // rcx: receiver
3002 // rdx: flags
3003
3004 invokevirtual_helper(rbx, rcx, rdx);
3005 }
3006
3007
3008 void TemplateTable::invokespecial(int byte_no) {
3009 transition(vtos, vtos);
3010 assert(byte_no == f1_byte, "use this argument");
3011 prepare_invoke(byte_no, rbx, noreg, // get f1 Method*
3012 rcx); // get receiver also for null check
3013 __ verify_oop(rcx);
3014 __ null_check(rcx);
3015 // do the call
3016 __ profile_call(rax);
3017 __ profile_arguments_type(rax, rbx, rsi, false);
3018 __ jump_from_interpreted(rbx, rax);
3019 }
3020
3021
3022 void TemplateTable::invokestatic(int byte_no) {
3023 transition(vtos, vtos);
3024 assert(byte_no == f1_byte, "use this argument");
3025 prepare_invoke(byte_no, rbx); // get f1 Method*
3026 // do the call
3027 __ profile_call(rax);
3028 __ profile_arguments_type(rax, rbx, rsi, false);
3029 __ jump_from_interpreted(rbx, rax);
3030 }
3031
3032
3033 void TemplateTable::fast_invokevfinal(int byte_no) {
3034 transition(vtos, vtos);
3035 assert(byte_no == f2_byte, "use this argument");
3036 __ stop("fast_invokevfinal not used on x86");
3037 }
3038
3039
3040 void TemplateTable::invokeinterface(int byte_no) {
3041 transition(vtos, vtos);
3042 assert(byte_no == f1_byte, "use this argument");
3043 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index
3044 rcx, rdx); // recv, flags
3045
3046 // rax: interface klass (from f1)
3047 // rbx: itable index (from f2)
3048 // rcx: receiver
3049 // rdx: flags
3050
3051 // Special case of invokeinterface called for virtual method of
3052 // java.lang.Object. See cpCacheOop.cpp for details.
3053 // This code isn't produced by javac, but could be produced by
3054 // another compliant java compiler.
3055 Label notMethod;
3056 __ movl(rdi, rdx);
3057 __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3058 __ jcc(Assembler::zero, notMethod);
3059
3060 invokevirtual_helper(rbx, rcx, rdx);
3061 __ bind(notMethod);
3062
3063 // Get receiver klass into rdx - also a null check
3064 __ restore_locals(); // restore rdi
3065 __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3066 __ load_klass(rdx, rcx);
3067
3068 // profile this call
3069 __ profile_virtual_call(rdx, rsi, rdi);
3070
3071 Label no_such_interface, no_such_method;
3072
3073 __ lookup_interface_method(// inputs: rec. class, interface, itable index
3074 rdx, rax, rbx,
3075 // outputs: method, scan temp. reg
3076 rbx, rsi,
3077 no_such_interface);
3078
3079 // rbx: Method* to call
3080 // rcx: receiver
3081 // Check for abstract method error
3082 // Note: This should be done more efficiently via a throw_abstract_method_error
3083 // interpreter entry point and a conditional jump to it in case of a null
3084 // method.
3085 __ testptr(rbx, rbx);
3086 __ jcc(Assembler::zero, no_such_method);
3087
3088 __ profile_arguments_type(rdx, rbx, rsi, true);
3089
3090 // do the call
3091 // rcx: receiver
3092 // rbx,: Method*
3093 __ jump_from_interpreted(rbx, rdx);
3094 __ should_not_reach_here();
3095
3096 // exception handling code follows...
3097 // note: must restore interpreter registers to canonical
3098 // state for exception handling to work correctly!
3099
3100 __ bind(no_such_method);
3101 // throw exception
3102 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3103 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3104 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3105 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3106 // the call_VM checks for exception, so we should never return here.
3107 __ should_not_reach_here();
3108
3109 __ bind(no_such_interface);
3110 // throw exception
3111 __ pop(rbx); // pop return address (pushed by prepare_invoke)
3112 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
3113 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
3114 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3115 InterpreterRuntime::throw_IncompatibleClassChangeError));
3116 // the call_VM checks for exception, so we should never return here.
3117 __ should_not_reach_here();
3118 }
3119
3120 void TemplateTable::invokehandle(int byte_no) {
3121 transition(vtos, vtos);
3122 assert(byte_no == f1_byte, "use this argument");
3123 const Register rbx_method = rbx;
3124 const Register rax_mtype = rax;
3125 const Register rcx_recv = rcx;
3126 const Register rdx_flags = rdx;
3127
3128 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3129 __ verify_method_ptr(rbx_method);
3130 __ verify_oop(rcx_recv);
3131 __ null_check(rcx_recv);
3132
3133 // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3134 // rbx: MH.invokeExact_MT method (from f2)
3135
3136 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke
3137
3138 // FIXME: profile the LambdaForm also
3139 __ profile_final_call(rax);
3140 __ profile_arguments_type(rdx, rbx_method, rsi, true);
3141
3142 __ jump_from_interpreted(rbx_method, rdx);
3143 }
3144
3145
3146 void TemplateTable::invokedynamic(int byte_no) {
3147 transition(vtos, vtos);
3148 assert(byte_no == f1_byte, "use this argument");
3149
3150 const Register rbx_method = rbx;
3151 const Register rax_callsite = rax;
3152
3153 prepare_invoke(byte_no, rbx_method, rax_callsite);
3154
3155 // rax: CallSite object (from cpool->resolved_references[f1])
3156 // rbx: MH.linkToCallSite method (from f2)
3157
3158 // Note: rax_callsite is already pushed by prepare_invoke
3159
3160 // %%% should make a type profile for any invokedynamic that takes a ref argument
3161 // profile this call
3162 __ profile_call(rsi);
3163 __ profile_arguments_type(rdx, rbx, rsi, false);
3164
3165 __ verify_oop(rax_callsite);
3166
3167 __ jump_from_interpreted(rbx_method, rdx);
3168 }
3169
3170 //----------------------------------------------------------------------------------------------------
3171 // Allocation
3172
3173 void TemplateTable::_new() {
3174 transition(vtos, atos);
3175 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3176 Label slow_case;
3177 Label slow_case_no_pop;
3178 Label done;
3179 Label initialize_header;
3180 Label initialize_object; // including clearing the fields
3181 Label allocate_shared;
3182
3183 __ get_cpool_and_tags(rcx, rax);
3184
3185 // Make sure the class we're about to instantiate has been resolved.
3186 // This is done before loading InstanceKlass to be consistent with the order
3187 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3188 const int tags_offset = Array<u1>::base_offset_in_bytes();
3189 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3190 __ jcc(Assembler::notEqual, slow_case_no_pop);
3191
3192 // get InstanceKlass
3193 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3194 __ push(rcx); // save the contexts of klass for initializing the header
3195
3196 // make sure klass is initialized & doesn't have finalizer
3197 // make sure klass is fully initialized
3198 __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3199 __ jcc(Assembler::notEqual, slow_case);
3200
3201 // get instance_size in InstanceKlass (scaled to a count of bytes)
3202 __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3203 // test to see if it has a finalizer or is malformed in some way
3204 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3205 __ jcc(Assembler::notZero, slow_case);
3206
3207 //
3208 // Allocate the instance
3209 // 1) Try to allocate in the TLAB
3210 // 2) if fail and the object is large allocate in the shared Eden
3211 // 3) if the above fails (or is not applicable), go to a slow case
3212 // (creates a new TLAB, etc.)
3213
3214 const bool allow_shared_alloc =
3215 Universe::heap()->supports_inline_contig_alloc();
3216
3217 const Register thread = rcx;
3218 if (UseTLAB || allow_shared_alloc) {
3219 __ get_thread(thread);
3220 }
3221
3222 if (UseTLAB) {
3223 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3224 __ lea(rbx, Address(rax, rdx, Address::times_1));
3225 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3226 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3227 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3228 if (ZeroTLAB) {
3229 // the fields have been already cleared
3230 __ jmp(initialize_header);
3231 } else {
3232 // initialize both the header and fields
3233 __ jmp(initialize_object);
3234 }
3235 }
3236
3237 // Allocation in the shared Eden, if allowed.
3238 //
3239 // rdx: instance size in bytes
3240 if (allow_shared_alloc) {
3241 __ bind(allocate_shared);
3242
3243 ExternalAddress heap_top((address)Universe::heap()->top_addr());
3244
3245 Label retry;
3246 __ bind(retry);
3247 __ movptr(rax, heap_top);
3248 __ lea(rbx, Address(rax, rdx, Address::times_1));
3249 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3250 __ jcc(Assembler::above, slow_case);
3251
3252 // Compare rax, with the top addr, and if still equal, store the new
3253 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3254 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3255 //
3256 // rax,: object begin
3257 // rbx,: object end
3258 // rdx: instance size in bytes
3259 __ locked_cmpxchgptr(rbx, heap_top);
3260
3261 // if someone beat us on the allocation, try again, otherwise continue
3262 __ jcc(Assembler::notEqual, retry);
3263
3264 __ incr_allocated_bytes(thread, rdx, 0);
3265 }
3266
3267 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3268 // The object is initialized before the header. If the object size is
3269 // zero, go directly to the header initialization.
3270 __ bind(initialize_object);
3271 __ decrement(rdx, sizeof(oopDesc));
3272 __ jcc(Assembler::zero, initialize_header);
3273
3274 // Initialize topmost object field, divide rdx by 8, check if odd and
3275 // test if zero.
3276 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
3277 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3278
3279 // rdx must have been multiple of 8
3280 #ifdef ASSERT
3281 // make sure rdx was multiple of 8
3282 Label L;
3283 // Ignore partial flag stall after shrl() since it is debug VM
3284 __ jccb(Assembler::carryClear, L);
3285 __ stop("object size is not multiple of 2 - adjust this code");
3286 __ bind(L);
3287 // rdx must be > 0, no extra check needed here
3288 #endif
3289
3290 // initialize remaining object fields: rdx was a multiple of 8
3291 { Label loop;
3292 __ bind(loop);
3293 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3294 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3295 __ decrement(rdx);
3296 __ jcc(Assembler::notZero, loop);
3297 }
3298
3299 // initialize object header only.
3300 __ bind(initialize_header);
3301 if (UseBiasedLocking) {
3302 __ pop(rcx); // get saved klass back in the register.
3303 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3304 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3305 } else {
3306 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3307 (int32_t)markOopDesc::prototype()); // header
3308 __ pop(rcx); // get saved klass back in the register.
3309 }
3310 __ store_klass(rax, rcx); // klass
3311
3312 {
3313 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3314 // Trigger dtrace event for fastpath
3315 __ push(atos);
3316 __ call_VM_leaf(
3317 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3318 __ pop(atos);
3319 }
3320
3321 __ jmp(done);
3322 }
3323
3324 // slow case
3325 __ bind(slow_case);
3326 __ pop(rcx); // restore stack pointer to what it was when we came in.
3327 __ bind(slow_case_no_pop);
3328 __ get_constant_pool(rax);
3329 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3330 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3331
3332 // continue
3333 __ bind(done);
3334 }
3335
3336
3337 void TemplateTable::newarray() {
3338 transition(itos, atos);
3339 __ push_i(rax); // make sure everything is on the stack
3340 __ load_unsigned_byte(rdx, at_bcp(1));
3341 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3342 __ pop_i(rdx); // discard size
3343 }
3344
3345
3346 void TemplateTable::anewarray() {
3347 transition(itos, atos);
3348 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3349 __ get_constant_pool(rcx);
3350 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3351 }
3352
3353
3354 void TemplateTable::arraylength() {
3355 transition(atos, itos);
3356 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3357 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3358 }
3359
3360
3361 void TemplateTable::checkcast() {
3362 transition(atos, atos);
3363 Label done, is_null, ok_is_subtype, quicked, resolved;
3364 __ testptr(rax, rax); // Object is in EAX
3365 __ jcc(Assembler::zero, is_null);
3366
3367 // Get cpool & tags index
3368 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3369 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3370 // See if bytecode has already been quicked
3371 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3372 __ jcc(Assembler::equal, quicked);
3373
3374 __ push(atos);
3375 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3376 // vm_result_2 has metadata result
3377 // borrow rdi from locals
3378 __ get_thread(rdi);
3379 __ get_vm_result_2(rax, rdi);
3380 __ restore_locals();
3381 __ pop_ptr(rdx);
3382 __ jmpb(resolved);
3383
3384 // Get superklass in EAX and subklass in EBX
3385 __ bind(quicked);
3386 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
3387 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3388
3389 __ bind(resolved);
3390 __ load_klass(rbx, rdx);
3391
3392 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
3393 // Superklass in EAX. Subklass in EBX.
3394 __ gen_subtype_check( rbx, ok_is_subtype );
3395
3396 // Come here on failure
3397 __ push(rdx);
3398 // object is at TOS
3399 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3400
3401 // Come here on success
3402 __ bind(ok_is_subtype);
3403 __ mov(rax,rdx); // Restore object in EDX
3404
3405 // Collect counts on whether this check-cast sees NULLs a lot or not.
3406 if (ProfileInterpreter) {
3407 __ jmp(done);
3408 __ bind(is_null);
3409 __ profile_null_seen(rcx);
3410 } else {
3411 __ bind(is_null); // same as 'done'
3412 }
3413 __ bind(done);
3414 }
3415
3416
3417 void TemplateTable::instanceof() {
3418 transition(atos, itos);
3419 Label done, is_null, ok_is_subtype, quicked, resolved;
3420 __ testptr(rax, rax);
3421 __ jcc(Assembler::zero, is_null);
3422
3423 // Get cpool & tags index
3424 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3425 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3426 // See if bytecode has already been quicked
3427 __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3428 __ jcc(Assembler::equal, quicked);
3429
3430 __ push(atos);
3431 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3432 // vm_result_2 has metadata result
3433 // borrow rdi from locals
3434 __ get_thread(rdi);
3435 __ get_vm_result_2(rax, rdi);
3436 __ restore_locals();
3437 __ pop_ptr(rdx);
3438 __ load_klass(rdx, rdx);
3439 __ jmp(resolved);
3440
3441 // Get superklass in EAX and subklass in EDX
3442 __ bind(quicked);
3443 __ load_klass(rdx, rax);
3444 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3445
3446 __ bind(resolved);
3447
3448 // Generate subtype check. Blows ECX. Resets EDI.
3449 // Superklass in EAX. Subklass in EDX.
3450 __ gen_subtype_check( rdx, ok_is_subtype );
3451
3452 // Come here on failure
3453 __ xorl(rax,rax);
3454 __ jmpb(done);
3455 // Come here on success
3456 __ bind(ok_is_subtype);
3457 __ movl(rax, 1);
3458
3459 // Collect counts on whether this test sees NULLs a lot or not.
3460 if (ProfileInterpreter) {
3461 __ jmp(done);
3462 __ bind(is_null);
3463 __ profile_null_seen(rcx);
3464 } else {
3465 __ bind(is_null); // same as 'done'
3466 }
3467 __ bind(done);
3468 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
3469 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
3470 }
3471
3472
3473 //----------------------------------------------------------------------------------------------------
3474 // Breakpoints
3475 void TemplateTable::_breakpoint() {
3476
3477 // Note: We get here even if we are single stepping..
3478 // jbug inists on setting breakpoints at every bytecode
3479 // even if we are in single step mode.
3480
3481 transition(vtos, vtos);
3482
3483 // get the unpatched byte code
3484 __ get_method(rcx);
3485 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3486 __ mov(rbx, rax);
3487
3488 // post the breakpoint event
3489 __ get_method(rcx);
3490 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3491
3492 // complete the execution of original bytecode
3493 __ dispatch_only_normal(vtos);
3494 }
3495
3496
3497 //----------------------------------------------------------------------------------------------------
3498 // Exceptions
3499
3500 void TemplateTable::athrow() {
3501 transition(atos, vtos);
3502 __ null_check(rax);
3503 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3504 }
3505
3506
3507 //----------------------------------------------------------------------------------------------------
3508 // Synchronization
3509 //
3510 // Note: monitorenter & exit are symmetric routines; which is reflected
3511 // in the assembly code structure as well
3512 //
3513 // Stack layout:
3514 //
3515 // [expressions ] <--- rsp = expression stack top
3516 // ..
3517 // [expressions ]
3518 // [monitor entry] <--- monitor block top = expression stack bot
3519 // ..
3520 // [monitor entry]
3521 // [frame data ] <--- monitor block bot
3522 // ...
3523 // [saved rbp, ] <--- rbp,
3524
3525
3526 void TemplateTable::monitorenter() {
3527 transition(atos, vtos);
3528
3529 // check for NULL object
3530 __ null_check(rax);
3531
3532 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3533 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3534 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3535 Label allocated;
3536
3537 // initialize entry pointer
3538 __ xorl(rdx, rdx); // points to free slot or NULL
3539
3540 // find a free slot in the monitor block (result in rdx)
3541 { Label entry, loop, exit;
3542 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
3543
3544 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3545 __ jmpb(entry);
3546
3547 __ bind(loop);
3548 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
3549 __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
3550 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3551 __ jccb(Assembler::equal, exit); // if same object then stop searching
3552 __ addptr(rcx, entry_size); // otherwise advance to next entry
3553 __ bind(entry);
3554 __ cmpptr(rcx, rbx); // check if bottom reached
3555 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3556 __ bind(exit);
3557 }
3558
3559 __ testptr(rdx, rdx); // check if a slot has been found
3560 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
3561
3562 // allocate one if there's no free slot
3563 { Label entry, loop;
3564 // 1. compute new pointers // rsp: old expression stack top
3565 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
3566 __ subptr(rsp, entry_size); // move expression stack top
3567 __ subptr(rdx, entry_size); // move expression stack bottom
3568 __ mov(rcx, rsp); // set start value for copy loop
3569 __ movptr(monitor_block_bot, rdx); // set new monitor block top
3570 __ jmp(entry);
3571 // 2. move expression stack contents
3572 __ bind(loop);
3573 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
3574 __ movptr(Address(rcx, 0), rbx); // and store it at new location
3575 __ addptr(rcx, wordSize); // advance to next word
3576 __ bind(entry);
3577 __ cmpptr(rcx, rdx); // check if bottom reached
3578 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
3579 }
3580
3581 // call run-time routine
3582 // rdx: points to monitor entry
3583 __ bind(allocated);
3584
3585 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3586 // The object has already been poped from the stack, so the expression stack looks correct.
3587 __ increment(rsi);
3588
3589 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
3590 __ lock_object(rdx);
3591
3592 // check to make sure this monitor doesn't cause stack overflow after locking
3593 __ save_bcp(); // in case of exception
3594 __ generate_stack_overflow_check(0);
3595
3596 // The bcp has already been incremented. Just need to dispatch to next instruction.
3597 __ dispatch_next(vtos);
3598 }
3599
3600
3601 void TemplateTable::monitorexit() {
3602 transition(atos, vtos);
3603
3604 // check for NULL object
3605 __ null_check(rax);
3606
3607 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3608 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
3609 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
3610 Label found;
3611
3612 // find matching slot
3613 { Label entry, loop;
3614 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
3615 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
3616 __ jmpb(entry);
3617
3618 __ bind(loop);
3619 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
3620 __ jcc(Assembler::equal, found); // if same object then stop searching
3621 __ addptr(rdx, entry_size); // otherwise advance to next entry
3622 __ bind(entry);
3623 __ cmpptr(rdx, rbx); // check if bottom reached
3624 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
3625 }
3626
3627 // error handling. Unlocking was not block-structured
3628 Label end;
3629 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3630 __ should_not_reach_here();
3631
3632 // call run-time routine
3633 // rcx: points to monitor entry
3634 __ bind(found);
3635 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
3636 __ unlock_object(rdx);
3637 __ pop_ptr(rax); // discard object
3638 __ bind(end);
3639 }
3640
3641
3642 //----------------------------------------------------------------------------------------------------
3643 // Wide instructions
3644
3645 void TemplateTable::wide() {
3646 transition(vtos, vtos);
3647 __ load_unsigned_byte(rbx, at_bcp(1));
3648 ExternalAddress wtable((address)Interpreter::_wentry_point);
3649 __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3650 // Note: the rsi increment step is part of the individual wide bytecode implementations
3651 }
3652
3653
3654 //----------------------------------------------------------------------------------------------------
3655 // Multi arrays
3656
3657 void TemplateTable::multianewarray() {
3658 transition(vtos, atos);
3659 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3660 // last dim is on top of stack; we want address of first one:
3661 // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3662 // the latter wordSize to point to the beginning of the array.
3663 __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3664 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
3665 __ load_unsigned_byte(rbx, at_bcp(3));
3666 __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
3667 }
3668
3669 #endif /* !CC_INTERP */