rev 56556 : 8232151: Minimal VM build broken after JDK-8232050
Reviewed-by: dholmes, clanger, redestad
1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "interpreter/templateInterpreter.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/methodHandles.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/safepointMechanism.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "utilities/macros.hpp"
45
46 #undef __
47 #define __ _masm->
48
49 // ============================================================================
50 // Misc helpers
51
52 // Do an oop store like *(base + index) = val OR *(base + offset) = val
53 // (only one of both variants is possible at the same time).
54 // Index can be noreg.
55 // Kills:
56 // Rbase, Rtmp
57 static void do_oop_store(InterpreterMacroAssembler* _masm,
58 Register base,
59 RegisterOrConstant offset,
60 Register val, // Noreg means always null.
61 Register tmp1,
62 Register tmp2,
63 Register tmp3,
64 DecoratorSet decorators) {
65 assert_different_registers(tmp1, tmp2, tmp3, val, base);
66 __ store_heap_oop(val, offset, base, tmp1, tmp2, tmp3, false, decorators);
67 }
68
69 static void do_oop_load(InterpreterMacroAssembler* _masm,
70 Register base,
71 RegisterOrConstant offset,
72 Register dst,
73 Register tmp1,
74 Register tmp2,
75 DecoratorSet decorators) {
76 assert_different_registers(base, tmp1, tmp2);
77 assert_different_registers(dst, tmp1, tmp2);
78 __ load_heap_oop(dst, offset, base, tmp1, tmp2, false, decorators);
79 }
80
81 // ============================================================================
82 // Platform-dependent initialization
83
84 void TemplateTable::pd_initialize() {
85 // No ppc64 specific initialization.
86 }
87
88 Address TemplateTable::at_bcp(int offset) {
89 // Not used on ppc.
90 ShouldNotReachHere();
91 return Address();
92 }
93
94 // Patches the current bytecode (ptr to it located in bcp)
95 // in the bytecode stream with a new one.
96 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
97 // With sharing on, may need to test method flag.
98 if (!RewriteBytecodes) return;
99 Label L_patch_done;
100
101 switch (new_bc) {
102 case Bytecodes::_fast_aputfield:
103 case Bytecodes::_fast_bputfield:
104 case Bytecodes::_fast_zputfield:
105 case Bytecodes::_fast_cputfield:
106 case Bytecodes::_fast_dputfield:
107 case Bytecodes::_fast_fputfield:
108 case Bytecodes::_fast_iputfield:
109 case Bytecodes::_fast_lputfield:
110 case Bytecodes::_fast_sputfield:
111 {
112 // We skip bytecode quickening for putfield instructions when
113 // the put_code written to the constant pool cache is zero.
114 // This is required so that every execution of this instruction
115 // calls out to InterpreterRuntime::resolve_get_put to do
116 // additional, required work.
117 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
118 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
119 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
120 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
121 #if defined(VM_LITTLE_ENDIAN)
122 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
123 #else
124 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
125 #endif
126 __ cmpwi(CCR0, Rnew_bc, 0);
127 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
128 __ beq(CCR0, L_patch_done);
129 // __ isync(); // acquire not needed
130 break;
131 }
132
133 default:
134 assert(byte_no == -1, "sanity");
135 if (load_bc_into_bc_reg) {
136 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
137 }
138 }
139
140 if (JvmtiExport::can_post_breakpoint()) {
141 Label L_fast_patch;
142 __ lbz(Rtemp, 0, R14_bcp);
143 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
144 __ bne(CCR0, L_fast_patch);
145 // Perform the quickening, slowly, in the bowels of the breakpoint table.
146 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
147 __ b(L_patch_done);
148 __ bind(L_fast_patch);
149 }
150
151 // Patch bytecode.
152 __ stb(Rnew_bc, 0, R14_bcp);
153
154 __ bind(L_patch_done);
155 }
156
157 // ============================================================================
158 // Individual instructions
159
160 void TemplateTable::nop() {
161 transition(vtos, vtos);
162 // Nothing to do.
163 }
164
165 void TemplateTable::shouldnotreachhere() {
166 transition(vtos, vtos);
167 __ stop("shouldnotreachhere bytecode");
168 }
169
170 void TemplateTable::aconst_null() {
171 transition(vtos, atos);
172 __ li(R17_tos, 0);
173 }
174
175 void TemplateTable::iconst(int value) {
176 transition(vtos, itos);
177 assert(value >= -1 && value <= 5, "");
178 __ li(R17_tos, value);
179 }
180
181 void TemplateTable::lconst(int value) {
182 transition(vtos, ltos);
183 assert(value >= -1 && value <= 5, "");
184 __ li(R17_tos, value);
185 }
186
187 void TemplateTable::fconst(int value) {
188 transition(vtos, ftos);
189 static float zero = 0.0;
190 static float one = 1.0;
191 static float two = 2.0;
192 switch (value) {
193 default: ShouldNotReachHere();
194 case 0: {
195 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
196 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
197 break;
198 }
199 case 1: {
200 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
201 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
202 break;
203 }
204 case 2: {
205 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
206 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
207 break;
208 }
209 }
210 }
211
212 void TemplateTable::dconst(int value) {
213 transition(vtos, dtos);
214 static double zero = 0.0;
215 static double one = 1.0;
216 switch (value) {
217 case 0: {
218 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
219 __ lfd(F15_ftos, simm16_offset, R11_scratch1);
220 break;
221 }
222 case 1: {
223 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
224 __ lfd(F15_ftos, simm16_offset, R11_scratch1);
225 break;
226 }
227 default: ShouldNotReachHere();
228 }
229 }
230
231 void TemplateTable::bipush() {
232 transition(vtos, itos);
233 __ lbz(R17_tos, 1, R14_bcp);
234 __ extsb(R17_tos, R17_tos);
235 }
236
237 void TemplateTable::sipush() {
238 transition(vtos, itos);
239 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
240 }
241
242 void TemplateTable::ldc(bool wide) {
243 Register Rscratch1 = R11_scratch1,
244 Rscratch2 = R12_scratch2,
245 Rcpool = R3_ARG1;
246
247 transition(vtos, vtos);
248 Label notInt, notFloat, notClass, exit;
249
250 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
251 if (wide) { // Read index.
252 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
253 } else {
254 __ lbz(Rscratch1, 1, R14_bcp);
255 }
256
257 const int base_offset = ConstantPool::header_size() * wordSize;
258 const int tags_offset = Array<u1>::base_offset_in_bytes();
259
260 // Get type from tags.
261 __ addi(Rscratch2, Rscratch2, tags_offset);
262 __ lbzx(Rscratch2, Rscratch2, Rscratch1);
263
264 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
265 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
266 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
267
268 // Resolved class - need to call vm to get java mirror of the class.
269 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
270 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
271 __ beq(CCR0, notClass);
272
273 __ li(R4, wide ? 1 : 0);
274 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
275 __ push(atos);
276 __ b(exit);
277
278 __ align(32, 12);
279 __ bind(notClass);
280 __ addi(Rcpool, Rcpool, base_offset);
281 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
282 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
283 __ bne(CCR0, notInt);
284 __ lwax(R17_tos, Rcpool, Rscratch1);
285 __ push(itos);
286 __ b(exit);
287
288 __ align(32, 12);
289 __ bind(notInt);
290 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
291 __ bne(CCR0, notFloat);
292 __ lfsx(F15_ftos, Rcpool, Rscratch1);
293 __ push(ftos);
294 __ b(exit);
295
296 __ align(32, 12);
297 // assume the tag is for condy; if not, the VM runtime will tell us
298 __ bind(notFloat);
299 condy_helper(exit);
300
301 __ align(32, 12);
302 __ bind(exit);
303 }
304
305 // Fast path for caching oop constants.
306 void TemplateTable::fast_aldc(bool wide) {
307 transition(vtos, atos);
308
309 int index_size = wide ? sizeof(u2) : sizeof(u1);
310 const Register Rscratch = R11_scratch1;
311 Label is_null;
312
313 // We are resolved if the resolved reference cache entry contains a
314 // non-null object (CallSite, etc.)
315 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index.
316 __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null);
317
318 // Convert null sentinel to NULL.
319 int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true);
320 __ ld(Rscratch, simm16_rest, Rscratch);
321 __ cmpld(CCR0, R17_tos, Rscratch);
322 if (VM_Version::has_isel()) {
323 __ isel_0(R17_tos, CCR0, Assembler::equal);
324 } else {
325 Label not_sentinel;
326 __ bne(CCR0, not_sentinel);
327 __ li(R17_tos, 0);
328 __ bind(not_sentinel);
329 }
330 __ verify_oop(R17_tos);
331 __ dispatch_epilog(atos, Bytecodes::length_for(bytecode()));
332
333 __ bind(is_null);
334 __ load_const_optimized(R3_ARG1, (int)bytecode());
335
336 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
337
338 // First time invocation - must resolve first.
339 __ call_VM(R17_tos, entry, R3_ARG1);
340 __ verify_oop(R17_tos);
341 }
342
343 void TemplateTable::ldc2_w() {
344 transition(vtos, vtos);
345 Label not_double, not_long, exit;
346
347 Register Rindex = R11_scratch1,
348 Rcpool = R12_scratch2,
349 Rtag = R3_ARG1;
350 __ get_cpool_and_tags(Rcpool, Rtag);
351 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
352
353 const int base_offset = ConstantPool::header_size() * wordSize;
354 const int tags_offset = Array<u1>::base_offset_in_bytes();
355 // Get type from tags.
356 __ addi(Rcpool, Rcpool, base_offset);
357 __ addi(Rtag, Rtag, tags_offset);
358
359 __ lbzx(Rtag, Rtag, Rindex);
360 __ sldi(Rindex, Rindex, LogBytesPerWord);
361
362 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
363 __ bne(CCR0, not_double);
364 __ lfdx(F15_ftos, Rcpool, Rindex);
365 __ push(dtos);
366 __ b(exit);
367
368 __ bind(not_double);
369 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long);
370 __ bne(CCR0, not_long);
371 __ ldx(R17_tos, Rcpool, Rindex);
372 __ push(ltos);
373 __ b(exit);
374
375 __ bind(not_long);
376 condy_helper(exit);
377
378 __ align(32, 12);
379 __ bind(exit);
380 }
381
382 void TemplateTable::condy_helper(Label& Done) {
383 const Register obj = R31;
384 const Register off = R11_scratch1;
385 const Register flags = R12_scratch2;
386 const Register rarg = R4_ARG2;
387 __ li(rarg, (int)bytecode());
388 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
389 __ get_vm_result_2(flags);
390
391 // VMr = obj = base address to find primitive value to push
392 // VMr2 = flags = (tos, off) using format of CPCE::_flags
393 __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask);
394
395 // What sort of thing are we loading?
396 __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
397
398 switch (bytecode()) {
399 case Bytecodes::_ldc:
400 case Bytecodes::_ldc_w:
401 {
402 // tos in (itos, ftos, stos, btos, ctos, ztos)
403 Label notInt, notFloat, notShort, notByte, notChar, notBool;
404 __ cmplwi(CCR0, flags, itos);
405 __ bne(CCR0, notInt);
406 // itos
407 __ lwax(R17_tos, obj, off);
408 __ push(itos);
409 __ b(Done);
410
411 __ bind(notInt);
412 __ cmplwi(CCR0, flags, ftos);
413 __ bne(CCR0, notFloat);
414 // ftos
415 __ lfsx(F15_ftos, obj, off);
416 __ push(ftos);
417 __ b(Done);
418
419 __ bind(notFloat);
420 __ cmplwi(CCR0, flags, stos);
421 __ bne(CCR0, notShort);
422 // stos
423 __ lhax(R17_tos, obj, off);
424 __ push(stos);
425 __ b(Done);
426
427 __ bind(notShort);
428 __ cmplwi(CCR0, flags, btos);
429 __ bne(CCR0, notByte);
430 // btos
431 __ lbzx(R17_tos, obj, off);
432 __ extsb(R17_tos, R17_tos);
433 __ push(btos);
434 __ b(Done);
435
436 __ bind(notByte);
437 __ cmplwi(CCR0, flags, ctos);
438 __ bne(CCR0, notChar);
439 // ctos
440 __ lhzx(R17_tos, obj, off);
441 __ push(ctos);
442 __ b(Done);
443
444 __ bind(notChar);
445 __ cmplwi(CCR0, flags, ztos);
446 __ bne(CCR0, notBool);
447 // ztos
448 __ lbzx(R17_tos, obj, off);
449 __ push(ztos);
450 __ b(Done);
451
452 __ bind(notBool);
453 break;
454 }
455
456 case Bytecodes::_ldc2_w:
457 {
458 Label notLong, notDouble;
459 __ cmplwi(CCR0, flags, ltos);
460 __ bne(CCR0, notLong);
461 // ltos
462 __ ldx(R17_tos, obj, off);
463 __ push(ltos);
464 __ b(Done);
465
466 __ bind(notLong);
467 __ cmplwi(CCR0, flags, dtos);
468 __ bne(CCR0, notDouble);
469 // dtos
470 __ lfdx(F15_ftos, obj, off);
471 __ push(dtos);
472 __ b(Done);
473
474 __ bind(notDouble);
475 break;
476 }
477
478 default:
479 ShouldNotReachHere();
480 }
481
482 __ stop("bad ldc/condy");
483 }
484
485 // Get the locals index located in the bytecode stream at bcp + offset.
486 void TemplateTable::locals_index(Register Rdst, int offset) {
487 __ lbz(Rdst, offset, R14_bcp);
488 }
489
490 void TemplateTable::iload() {
491 iload_internal();
492 }
493
494 void TemplateTable::nofast_iload() {
495 iload_internal(may_not_rewrite);
496 }
497
498 void TemplateTable::iload_internal(RewriteControl rc) {
499 transition(vtos, itos);
500
501 // Get the local value into tos
502 const Register Rindex = R22_tmp2;
503 locals_index(Rindex);
504
505 // Rewrite iload,iload pair into fast_iload2
506 // iload,caload pair into fast_icaload
507 if (RewriteFrequentPairs && rc == may_rewrite) {
508 Label Lrewrite, Ldone;
509 Register Rnext_byte = R3_ARG1,
510 Rrewrite_to = R6_ARG4,
511 Rscratch = R11_scratch1;
512
513 // get next byte
514 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
515
516 // if _iload, wait to rewrite to iload2. We only want to rewrite the
517 // last two iloads in a pair. Comparing against fast_iload means that
518 // the next bytecode is neither an iload or a caload, and therefore
519 // an iload pair.
520 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
521 __ beq(CCR0, Ldone);
522
523 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
524 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
525 __ beq(CCR1, Lrewrite);
526
527 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
528 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
529 __ beq(CCR0, Lrewrite);
530
531 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
532
533 __ bind(Lrewrite);
534 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
535 __ bind(Ldone);
536 }
537
538 __ load_local_int(R17_tos, Rindex, Rindex);
539 }
540
541 // Load 2 integers in a row without dispatching
542 void TemplateTable::fast_iload2() {
543 transition(vtos, itos);
544
545 __ lbz(R3_ARG1, 1, R14_bcp);
546 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
547
548 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
549 __ load_local_int(R17_tos, R12_scratch2, R17_tos);
550 __ push_i(R3_ARG1);
551 }
552
553 void TemplateTable::fast_iload() {
554 transition(vtos, itos);
555 // Get the local value into tos
556
557 const Register Rindex = R11_scratch1;
558 locals_index(Rindex);
559 __ load_local_int(R17_tos, Rindex, Rindex);
560 }
561
562 // Load a local variable type long from locals area to TOS cache register.
563 // Local index resides in bytecodestream.
564 void TemplateTable::lload() {
565 transition(vtos, ltos);
566
567 const Register Rindex = R11_scratch1;
568 locals_index(Rindex);
569 __ load_local_long(R17_tos, Rindex, Rindex);
570 }
571
572 void TemplateTable::fload() {
573 transition(vtos, ftos);
574
575 const Register Rindex = R11_scratch1;
576 locals_index(Rindex);
577 __ load_local_float(F15_ftos, Rindex, Rindex);
578 }
579
580 void TemplateTable::dload() {
581 transition(vtos, dtos);
582
583 const Register Rindex = R11_scratch1;
584 locals_index(Rindex);
585 __ load_local_double(F15_ftos, Rindex, Rindex);
586 }
587
588 void TemplateTable::aload() {
589 transition(vtos, atos);
590
591 const Register Rindex = R11_scratch1;
592 locals_index(Rindex);
593 __ load_local_ptr(R17_tos, Rindex, Rindex);
594 }
595
596 void TemplateTable::locals_index_wide(Register Rdst) {
597 // Offset is 2, not 1, because Lbcp points to wide prefix code.
598 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
599 }
600
601 void TemplateTable::wide_iload() {
602 // Get the local value into tos.
603
604 const Register Rindex = R11_scratch1;
605 locals_index_wide(Rindex);
606 __ load_local_int(R17_tos, Rindex, Rindex);
607 }
608
609 void TemplateTable::wide_lload() {
610 transition(vtos, ltos);
611
612 const Register Rindex = R11_scratch1;
613 locals_index_wide(Rindex);
614 __ load_local_long(R17_tos, Rindex, Rindex);
615 }
616
617 void TemplateTable::wide_fload() {
618 transition(vtos, ftos);
619
620 const Register Rindex = R11_scratch1;
621 locals_index_wide(Rindex);
622 __ load_local_float(F15_ftos, Rindex, Rindex);
623 }
624
625 void TemplateTable::wide_dload() {
626 transition(vtos, dtos);
627
628 const Register Rindex = R11_scratch1;
629 locals_index_wide(Rindex);
630 __ load_local_double(F15_ftos, Rindex, Rindex);
631 }
632
633 void TemplateTable::wide_aload() {
634 transition(vtos, atos);
635
636 const Register Rindex = R11_scratch1;
637 locals_index_wide(Rindex);
638 __ load_local_ptr(R17_tos, Rindex, Rindex);
639 }
640
641 void TemplateTable::iaload() {
642 transition(itos, itos);
643
644 const Register Rload_addr = R3_ARG1,
645 Rarray = R4_ARG2,
646 Rtemp = R5_ARG3;
647 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
648 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
649 }
650
651 void TemplateTable::laload() {
652 transition(itos, ltos);
653
654 const Register Rload_addr = R3_ARG1,
655 Rarray = R4_ARG2,
656 Rtemp = R5_ARG3;
657 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
658 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
659 }
660
661 void TemplateTable::faload() {
662 transition(itos, ftos);
663
664 const Register Rload_addr = R3_ARG1,
665 Rarray = R4_ARG2,
666 Rtemp = R5_ARG3;
667 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
668 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
669 }
670
671 void TemplateTable::daload() {
672 transition(itos, dtos);
673
674 const Register Rload_addr = R3_ARG1,
675 Rarray = R4_ARG2,
676 Rtemp = R5_ARG3;
677 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
678 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
679 }
680
681 void TemplateTable::aaload() {
682 transition(itos, atos);
683
684 // tos: index
685 // result tos: array
686 const Register Rload_addr = R3_ARG1,
687 Rarray = R4_ARG2,
688 Rtemp = R5_ARG3,
689 Rtemp2 = R31;
690 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
691 do_oop_load(_masm, Rload_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos, Rtemp, Rtemp2,
692 IS_ARRAY);
693 __ verify_oop(R17_tos);
694 //__ dcbt(R17_tos); // prefetch
695 }
696
697 void TemplateTable::baload() {
698 transition(itos, itos);
699
700 const Register Rload_addr = R3_ARG1,
701 Rarray = R4_ARG2,
702 Rtemp = R5_ARG3;
703 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
704 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
705 __ extsb(R17_tos, R17_tos);
706 }
707
708 void TemplateTable::caload() {
709 transition(itos, itos);
710
711 const Register Rload_addr = R3_ARG1,
712 Rarray = R4_ARG2,
713 Rtemp = R5_ARG3;
714 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
715 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
716 }
717
718 // Iload followed by caload frequent pair.
719 void TemplateTable::fast_icaload() {
720 transition(vtos, itos);
721
722 const Register Rload_addr = R3_ARG1,
723 Rarray = R4_ARG2,
724 Rtemp = R11_scratch1;
725
726 locals_index(R17_tos);
727 __ load_local_int(R17_tos, Rtemp, R17_tos);
728 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
729 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
730 }
731
732 void TemplateTable::saload() {
733 transition(itos, itos);
734
735 const Register Rload_addr = R11_scratch1,
736 Rarray = R12_scratch2,
737 Rtemp = R3_ARG1;
738 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
739 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
740 }
741
742 void TemplateTable::iload(int n) {
743 transition(vtos, itos);
744
745 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
746 }
747
748 void TemplateTable::lload(int n) {
749 transition(vtos, ltos);
750
751 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
752 }
753
754 void TemplateTable::fload(int n) {
755 transition(vtos, ftos);
756
757 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
758 }
759
760 void TemplateTable::dload(int n) {
761 transition(vtos, dtos);
762
763 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
764 }
765
766 void TemplateTable::aload(int n) {
767 transition(vtos, atos);
768
769 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
770 }
771
772 void TemplateTable::aload_0() {
773 aload_0_internal();
774 }
775
776 void TemplateTable::nofast_aload_0() {
777 aload_0_internal(may_not_rewrite);
778 }
779
780 void TemplateTable::aload_0_internal(RewriteControl rc) {
781 transition(vtos, atos);
782 // According to bytecode histograms, the pairs:
783 //
784 // _aload_0, _fast_igetfield
785 // _aload_0, _fast_agetfield
786 // _aload_0, _fast_fgetfield
787 //
788 // occur frequently. If RewriteFrequentPairs is set, the (slow)
789 // _aload_0 bytecode checks if the next bytecode is either
790 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
791 // rewrites the current bytecode into a pair bytecode; otherwise it
792 // rewrites the current bytecode into _0 that doesn't do
793 // the pair check anymore.
794 //
795 // Note: If the next bytecode is _getfield, the rewrite must be
796 // delayed, otherwise we may miss an opportunity for a pair.
797 //
798 // Also rewrite frequent pairs
799 // aload_0, aload_1
800 // aload_0, iload_1
801 // These bytecodes with a small amount of code are most profitable
802 // to rewrite.
803
804 if (RewriteFrequentPairs && rc == may_rewrite) {
805
806 Label Lrewrite, Ldont_rewrite;
807 Register Rnext_byte = R3_ARG1,
808 Rrewrite_to = R6_ARG4,
809 Rscratch = R11_scratch1;
810
811 // Get next byte.
812 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
813
814 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
815 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
816 __ beq(CCR0, Ldont_rewrite);
817
818 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
819 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
820 __ beq(CCR1, Lrewrite);
821
822 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
823 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
824 __ beq(CCR0, Lrewrite);
825
826 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
827 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
828 __ beq(CCR1, Lrewrite);
829
830 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
831
832 __ bind(Lrewrite);
833 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
834 __ bind(Ldont_rewrite);
835 }
836
837 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
838 aload(0);
839 }
840
841 void TemplateTable::istore() {
842 transition(itos, vtos);
843
844 const Register Rindex = R11_scratch1;
845 locals_index(Rindex);
846 __ store_local_int(R17_tos, Rindex);
847 }
848
849 void TemplateTable::lstore() {
850 transition(ltos, vtos);
851 const Register Rindex = R11_scratch1;
852 locals_index(Rindex);
853 __ store_local_long(R17_tos, Rindex);
854 }
855
856 void TemplateTable::fstore() {
857 transition(ftos, vtos);
858
859 const Register Rindex = R11_scratch1;
860 locals_index(Rindex);
861 __ store_local_float(F15_ftos, Rindex);
862 }
863
864 void TemplateTable::dstore() {
865 transition(dtos, vtos);
866
867 const Register Rindex = R11_scratch1;
868 locals_index(Rindex);
869 __ store_local_double(F15_ftos, Rindex);
870 }
871
872 void TemplateTable::astore() {
873 transition(vtos, vtos);
874
875 const Register Rindex = R11_scratch1;
876 __ pop_ptr();
877 __ verify_oop_or_return_address(R17_tos, Rindex);
878 locals_index(Rindex);
879 __ store_local_ptr(R17_tos, Rindex);
880 }
881
882 void TemplateTable::wide_istore() {
883 transition(vtos, vtos);
884
885 const Register Rindex = R11_scratch1;
886 __ pop_i();
887 locals_index_wide(Rindex);
888 __ store_local_int(R17_tos, Rindex);
889 }
890
891 void TemplateTable::wide_lstore() {
892 transition(vtos, vtos);
893
894 const Register Rindex = R11_scratch1;
895 __ pop_l();
896 locals_index_wide(Rindex);
897 __ store_local_long(R17_tos, Rindex);
898 }
899
900 void TemplateTable::wide_fstore() {
901 transition(vtos, vtos);
902
903 const Register Rindex = R11_scratch1;
904 __ pop_f();
905 locals_index_wide(Rindex);
906 __ store_local_float(F15_ftos, Rindex);
907 }
908
909 void TemplateTable::wide_dstore() {
910 transition(vtos, vtos);
911
912 const Register Rindex = R11_scratch1;
913 __ pop_d();
914 locals_index_wide(Rindex);
915 __ store_local_double(F15_ftos, Rindex);
916 }
917
918 void TemplateTable::wide_astore() {
919 transition(vtos, vtos);
920
921 const Register Rindex = R11_scratch1;
922 __ pop_ptr();
923 __ verify_oop_or_return_address(R17_tos, Rindex);
924 locals_index_wide(Rindex);
925 __ store_local_ptr(R17_tos, Rindex);
926 }
927
928 void TemplateTable::iastore() {
929 transition(itos, vtos);
930
931 const Register Rindex = R3_ARG1,
932 Rstore_addr = R4_ARG2,
933 Rarray = R5_ARG3,
934 Rtemp = R6_ARG4;
935 __ pop_i(Rindex);
936 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
937 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
938 }
939
940 void TemplateTable::lastore() {
941 transition(ltos, vtos);
942
943 const Register Rindex = R3_ARG1,
944 Rstore_addr = R4_ARG2,
945 Rarray = R5_ARG3,
946 Rtemp = R6_ARG4;
947 __ pop_i(Rindex);
948 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
949 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
950 }
951
952 void TemplateTable::fastore() {
953 transition(ftos, vtos);
954
955 const Register Rindex = R3_ARG1,
956 Rstore_addr = R4_ARG2,
957 Rarray = R5_ARG3,
958 Rtemp = R6_ARG4;
959 __ pop_i(Rindex);
960 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
961 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
962 }
963
964 void TemplateTable::dastore() {
965 transition(dtos, vtos);
966
967 const Register Rindex = R3_ARG1,
968 Rstore_addr = R4_ARG2,
969 Rarray = R5_ARG3,
970 Rtemp = R6_ARG4;
971 __ pop_i(Rindex);
972 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
973 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
974 }
975
976 // Pop 3 values from the stack and...
977 void TemplateTable::aastore() {
978 transition(vtos, vtos);
979
980 Label Lstore_ok, Lis_null, Ldone;
981 const Register Rindex = R3_ARG1,
982 Rarray = R4_ARG2,
983 Rscratch = R11_scratch1,
984 Rscratch2 = R12_scratch2,
985 Rarray_klass = R5_ARG3,
986 Rarray_element_klass = Rarray_klass,
987 Rvalue_klass = R6_ARG4,
988 Rstore_addr = R31; // Use register which survives VM call.
989
990 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
991 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
992 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array.
993
994 __ verify_oop(R17_tos);
995 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
996 // Rindex is dead!
997 Register Rscratch3 = Rindex;
998
999 // Do array store check - check for NULL value first.
1000 __ cmpdi(CCR0, R17_tos, 0);
1001 __ beq(CCR0, Lis_null);
1002
1003 __ load_klass(Rarray_klass, Rarray);
1004 __ load_klass(Rvalue_klass, R17_tos);
1005
1006 // Do fast instanceof cache test.
1007 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
1008
1009 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
1010 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
1011
1012 // Fell through: subtype check failed => throw an exception.
1013 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
1014 __ mtctr(R11_scratch1);
1015 __ bctr();
1016
1017 __ bind(Lis_null);
1018 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
1019 Rscratch, Rscratch2, Rscratch3, IS_ARRAY);
1020 __ profile_null_seen(Rscratch, Rscratch2);
1021 __ b(Ldone);
1022
1023 // Store is OK.
1024 __ bind(Lstore_ok);
1025 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
1026 Rscratch, Rscratch2, Rscratch3, IS_ARRAY | IS_NOT_NULL);
1027
1028 __ bind(Ldone);
1029 // Adjust sp (pops array, index and value).
1030 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
1031 }
1032
1033 void TemplateTable::bastore() {
1034 transition(itos, vtos);
1035
1036 const Register Rindex = R11_scratch1,
1037 Rarray = R12_scratch2,
1038 Rscratch = R3_ARG1;
1039 __ pop_i(Rindex);
1040 __ pop_ptr(Rarray);
1041 // tos: val
1042
1043 // Need to check whether array is boolean or byte
1044 // since both types share the bastore bytecode.
1045 __ load_klass(Rscratch, Rarray);
1046 __ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch);
1047 int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit());
1048 __ testbitdi(CCR0, R0, Rscratch, diffbit);
1049 Label L_skip;
1050 __ bfalse(CCR0, L_skip);
1051 __ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1052 __ bind(L_skip);
1053
1054 __ index_check_without_pop(Rarray, Rindex, 0, Rscratch, Rarray);
1055 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
1056 }
1057
1058 void TemplateTable::castore() {
1059 transition(itos, vtos);
1060
1061 const Register Rindex = R11_scratch1,
1062 Rarray = R12_scratch2,
1063 Rscratch = R3_ARG1;
1064 __ pop_i(Rindex);
1065 // tos: val
1066 // Rarray: array ptr (popped by index_check)
1067 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
1068 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
1069 }
1070
1071 void TemplateTable::sastore() {
1072 castore();
1073 }
1074
1075 void TemplateTable::istore(int n) {
1076 transition(itos, vtos);
1077 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1078 }
1079
1080 void TemplateTable::lstore(int n) {
1081 transition(ltos, vtos);
1082 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1083 }
1084
1085 void TemplateTable::fstore(int n) {
1086 transition(ftos, vtos);
1087 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
1088 }
1089
1090 void TemplateTable::dstore(int n) {
1091 transition(dtos, vtos);
1092 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1093 }
1094
1095 void TemplateTable::astore(int n) {
1096 transition(vtos, vtos);
1097
1098 __ pop_ptr();
1099 __ verify_oop_or_return_address(R17_tos, R11_scratch1);
1100 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1101 }
1102
1103 void TemplateTable::pop() {
1104 transition(vtos, vtos);
1105
1106 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
1107 }
1108
1109 void TemplateTable::pop2() {
1110 transition(vtos, vtos);
1111
1112 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
1113 }
1114
1115 void TemplateTable::dup() {
1116 transition(vtos, vtos);
1117
1118 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
1119 __ push_ptr(R11_scratch1);
1120 }
1121
1122 void TemplateTable::dup_x1() {
1123 transition(vtos, vtos);
1124
1125 Register Ra = R11_scratch1,
1126 Rb = R12_scratch2;
1127 // stack: ..., a, b
1128 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1129 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1130 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1131 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1132 __ push_ptr(Rb);
1133 // stack: ..., b, a, b
1134 }
1135
1136 void TemplateTable::dup_x2() {
1137 transition(vtos, vtos);
1138
1139 Register Ra = R11_scratch1,
1140 Rb = R12_scratch2,
1141 Rc = R3_ARG1;
1142
1143 // stack: ..., a, b, c
1144 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c
1145 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a
1146 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
1147 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b
1148 // stack: ..., c, b, c
1149 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
1150 // stack: ..., c, a, c
1151 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c
1152 __ push_ptr(Rc); // push c
1153 // stack: ..., c, a, b, c
1154 }
1155
1156 void TemplateTable::dup2() {
1157 transition(vtos, vtos);
1158
1159 Register Ra = R11_scratch1,
1160 Rb = R12_scratch2;
1161 // stack: ..., a, b
1162 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1163 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1164 __ push_2ptrs(Ra, Rb);
1165 // stack: ..., a, b, a, b
1166 }
1167
1168 void TemplateTable::dup2_x1() {
1169 transition(vtos, vtos);
1170
1171 Register Ra = R11_scratch1,
1172 Rb = R12_scratch2,
1173 Rc = R3_ARG1;
1174 // stack: ..., a, b, c
1175 __ ld(Rc, Interpreter::stackElementSize, R15_esp);
1176 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
1177 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
1178 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
1179 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1180 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
1181 // stack: ..., b, c, a
1182 __ push_2ptrs(Rb, Rc);
1183 // stack: ..., b, c, a, b, c
1184 }
1185
1186 void TemplateTable::dup2_x2() {
1187 transition(vtos, vtos);
1188
1189 Register Ra = R11_scratch1,
1190 Rb = R12_scratch2,
1191 Rc = R3_ARG1,
1192 Rd = R4_ARG2;
1193 // stack: ..., a, b, c, d
1194 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
1195 __ ld(Rd, Interpreter::stackElementSize, R15_esp);
1196 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d
1197 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b
1198 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
1199 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
1200 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c
1201 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a
1202 // stack: ..., c, d, a, b
1203 __ push_2ptrs(Rc, Rd);
1204 // stack: ..., c, d, a, b, c, d
1205 }
1206
1207 void TemplateTable::swap() {
1208 transition(vtos, vtos);
1209 // stack: ..., a, b
1210
1211 Register Ra = R11_scratch1,
1212 Rb = R12_scratch2;
1213 // stack: ..., a, b
1214 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1215 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1216 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1217 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1218 // stack: ..., b, a
1219 }
1220
1221 void TemplateTable::iop2(Operation op) {
1222 transition(itos, itos);
1223
1224 Register Rscratch = R11_scratch1;
1225
1226 __ pop_i(Rscratch);
1227 // tos = number of bits to shift
1228 // Rscratch = value to shift
1229 switch (op) {
1230 case add: __ add(R17_tos, Rscratch, R17_tos); break;
1231 case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
1232 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break;
1233 case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
1234 case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
1235 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
1236 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
1237 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
1238 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
1239 default: ShouldNotReachHere();
1240 }
1241 }
1242
1243 void TemplateTable::lop2(Operation op) {
1244 transition(ltos, ltos);
1245
1246 Register Rscratch = R11_scratch1;
1247 __ pop_l(Rscratch);
1248 switch (op) {
1249 case add: __ add(R17_tos, Rscratch, R17_tos); break;
1250 case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
1251 case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
1252 case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
1253 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
1254 default: ShouldNotReachHere();
1255 }
1256 }
1257
1258 void TemplateTable::idiv() {
1259 transition(itos, itos);
1260
1261 Label Lnormal, Lexception, Ldone;
1262 Register Rdividend = R11_scratch1; // Used by irem.
1263
1264 __ addi(R0, R17_tos, 1);
1265 __ cmplwi(CCR0, R0, 2);
1266 __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1267
1268 __ cmpwi(CCR1, R17_tos, 0);
1269 __ beq(CCR1, Lexception); // divisor == 0
1270
1271 __ pop_i(Rdividend);
1272 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
1273 __ b(Ldone);
1274
1275 __ bind(Lexception);
1276 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1277 __ mtctr(R11_scratch1);
1278 __ bctr();
1279
1280 __ align(32, 12);
1281 __ bind(Lnormal);
1282 __ pop_i(Rdividend);
1283 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1284 __ bind(Ldone);
1285 }
1286
1287 void TemplateTable::irem() {
1288 transition(itos, itos);
1289
1290 __ mr(R12_scratch2, R17_tos);
1291 idiv();
1292 __ mullw(R17_tos, R17_tos, R12_scratch2);
1293 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
1294 }
1295
1296 void TemplateTable::lmul() {
1297 transition(ltos, ltos);
1298
1299 __ pop_l(R11_scratch1);
1300 __ mulld(R17_tos, R11_scratch1, R17_tos);
1301 }
1302
1303 void TemplateTable::ldiv() {
1304 transition(ltos, ltos);
1305
1306 Label Lnormal, Lexception, Ldone;
1307 Register Rdividend = R11_scratch1; // Used by lrem.
1308
1309 __ addi(R0, R17_tos, 1);
1310 __ cmpldi(CCR0, R0, 2);
1311 __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1312
1313 __ cmpdi(CCR1, R17_tos, 0);
1314 __ beq(CCR1, Lexception); // divisor == 0
1315
1316 __ pop_l(Rdividend);
1317 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
1318 __ b(Ldone);
1319
1320 __ bind(Lexception);
1321 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1322 __ mtctr(R11_scratch1);
1323 __ bctr();
1324
1325 __ align(32, 12);
1326 __ bind(Lnormal);
1327 __ pop_l(Rdividend);
1328 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1329 __ bind(Ldone);
1330 }
1331
1332 void TemplateTable::lrem() {
1333 transition(ltos, ltos);
1334
1335 __ mr(R12_scratch2, R17_tos);
1336 ldiv();
1337 __ mulld(R17_tos, R17_tos, R12_scratch2);
1338 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
1339 }
1340
1341 void TemplateTable::lshl() {
1342 transition(itos, ltos);
1343
1344 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1345 __ pop_l(R11_scratch1);
1346 __ sld(R17_tos, R11_scratch1, R17_tos);
1347 }
1348
1349 void TemplateTable::lshr() {
1350 transition(itos, ltos);
1351
1352 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1353 __ pop_l(R11_scratch1);
1354 __ srad(R17_tos, R11_scratch1, R17_tos);
1355 }
1356
1357 void TemplateTable::lushr() {
1358 transition(itos, ltos);
1359
1360 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1361 __ pop_l(R11_scratch1);
1362 __ srd(R17_tos, R11_scratch1, R17_tos);
1363 }
1364
1365 void TemplateTable::fop2(Operation op) {
1366 transition(ftos, ftos);
1367
1368 switch (op) {
1369 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
1370 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1371 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
1372 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1373 case rem:
1374 __ pop_f(F1_ARG1);
1375 __ fmr(F2_ARG2, F15_ftos);
1376 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1377 __ fmr(F15_ftos, F1_RET);
1378 break;
1379
1380 default: ShouldNotReachHere();
1381 }
1382 }
1383
1384 void TemplateTable::dop2(Operation op) {
1385 transition(dtos, dtos);
1386
1387 switch (op) {
1388 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
1389 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
1390 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
1391 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
1392 case rem:
1393 __ pop_d(F1_ARG1);
1394 __ fmr(F2_ARG2, F15_ftos);
1395 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1396 __ fmr(F15_ftos, F1_RET);
1397 break;
1398
1399 default: ShouldNotReachHere();
1400 }
1401 }
1402
1403 // Negate the value in the TOS cache.
1404 void TemplateTable::ineg() {
1405 transition(itos, itos);
1406
1407 __ neg(R17_tos, R17_tos);
1408 }
1409
1410 // Negate the value in the TOS cache.
1411 void TemplateTable::lneg() {
1412 transition(ltos, ltos);
1413
1414 __ neg(R17_tos, R17_tos);
1415 }
1416
1417 void TemplateTable::fneg() {
1418 transition(ftos, ftos);
1419
1420 __ fneg(F15_ftos, F15_ftos);
1421 }
1422
1423 void TemplateTable::dneg() {
1424 transition(dtos, dtos);
1425
1426 __ fneg(F15_ftos, F15_ftos);
1427 }
1428
1429 // Increments a local variable in place.
1430 void TemplateTable::iinc() {
1431 transition(vtos, vtos);
1432
1433 const Register Rindex = R11_scratch1,
1434 Rincrement = R0,
1435 Rvalue = R12_scratch2;
1436
1437 locals_index(Rindex); // Load locals index from bytecode stream.
1438 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream.
1439 __ extsb(Rincrement, Rincrement);
1440
1441 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
1442
1443 __ add(Rvalue, Rincrement, Rvalue);
1444 __ stw(Rvalue, 0, Rindex);
1445 }
1446
1447 void TemplateTable::wide_iinc() {
1448 transition(vtos, vtos);
1449
1450 Register Rindex = R11_scratch1,
1451 Rlocals_addr = Rindex,
1452 Rincr = R12_scratch2;
1453 locals_index_wide(Rindex);
1454 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
1455 __ load_local_int(R17_tos, Rlocals_addr, Rindex);
1456 __ add(R17_tos, Rincr, R17_tos);
1457 __ stw(R17_tos, 0, Rlocals_addr);
1458 }
1459
1460 void TemplateTable::convert() {
1461 // %%%%% Factor this first part accross platforms
1462 #ifdef ASSERT
1463 TosState tos_in = ilgl;
1464 TosState tos_out = ilgl;
1465 switch (bytecode()) {
1466 case Bytecodes::_i2l: // fall through
1467 case Bytecodes::_i2f: // fall through
1468 case Bytecodes::_i2d: // fall through
1469 case Bytecodes::_i2b: // fall through
1470 case Bytecodes::_i2c: // fall through
1471 case Bytecodes::_i2s: tos_in = itos; break;
1472 case Bytecodes::_l2i: // fall through
1473 case Bytecodes::_l2f: // fall through
1474 case Bytecodes::_l2d: tos_in = ltos; break;
1475 case Bytecodes::_f2i: // fall through
1476 case Bytecodes::_f2l: // fall through
1477 case Bytecodes::_f2d: tos_in = ftos; break;
1478 case Bytecodes::_d2i: // fall through
1479 case Bytecodes::_d2l: // fall through
1480 case Bytecodes::_d2f: tos_in = dtos; break;
1481 default : ShouldNotReachHere();
1482 }
1483 switch (bytecode()) {
1484 case Bytecodes::_l2i: // fall through
1485 case Bytecodes::_f2i: // fall through
1486 case Bytecodes::_d2i: // fall through
1487 case Bytecodes::_i2b: // fall through
1488 case Bytecodes::_i2c: // fall through
1489 case Bytecodes::_i2s: tos_out = itos; break;
1490 case Bytecodes::_i2l: // fall through
1491 case Bytecodes::_f2l: // fall through
1492 case Bytecodes::_d2l: tos_out = ltos; break;
1493 case Bytecodes::_i2f: // fall through
1494 case Bytecodes::_l2f: // fall through
1495 case Bytecodes::_d2f: tos_out = ftos; break;
1496 case Bytecodes::_i2d: // fall through
1497 case Bytecodes::_l2d: // fall through
1498 case Bytecodes::_f2d: tos_out = dtos; break;
1499 default : ShouldNotReachHere();
1500 }
1501 transition(tos_in, tos_out);
1502 #endif
1503
1504 // Conversion
1505 Label done;
1506 switch (bytecode()) {
1507 case Bytecodes::_i2l:
1508 __ extsw(R17_tos, R17_tos);
1509 break;
1510
1511 case Bytecodes::_l2i:
1512 // Nothing to do, we'll continue to work with the lower bits.
1513 break;
1514
1515 case Bytecodes::_i2b:
1516 __ extsb(R17_tos, R17_tos);
1517 break;
1518
1519 case Bytecodes::_i2c:
1520 __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
1521 break;
1522
1523 case Bytecodes::_i2s:
1524 __ extsh(R17_tos, R17_tos);
1525 break;
1526
1527 case Bytecodes::_i2d:
1528 __ extsw(R17_tos, R17_tos);
1529 case Bytecodes::_l2d:
1530 __ move_l_to_d();
1531 __ fcfid(F15_ftos, F15_ftos);
1532 break;
1533
1534 case Bytecodes::_i2f:
1535 __ extsw(R17_tos, R17_tos);
1536 __ move_l_to_d();
1537 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1538 // Comment: alternatively, load with sign extend could be done by lfiwax.
1539 __ fcfids(F15_ftos, F15_ftos);
1540 } else {
1541 __ fcfid(F15_ftos, F15_ftos);
1542 __ frsp(F15_ftos, F15_ftos);
1543 }
1544 break;
1545
1546 case Bytecodes::_l2f:
1547 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1548 __ move_l_to_d();
1549 __ fcfids(F15_ftos, F15_ftos);
1550 } else {
1551 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
1552 __ mr(R3_ARG1, R17_tos);
1553 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
1554 __ fmr(F15_ftos, F1_RET);
1555 }
1556 break;
1557
1558 case Bytecodes::_f2d:
1559 // empty
1560 break;
1561
1562 case Bytecodes::_d2f:
1563 __ frsp(F15_ftos, F15_ftos);
1564 break;
1565
1566 case Bytecodes::_d2i:
1567 case Bytecodes::_f2i:
1568 __ fcmpu(CCR0, F15_ftos, F15_ftos);
1569 __ li(R17_tos, 0); // 0 in case of NAN
1570 __ bso(CCR0, done);
1571 __ fctiwz(F15_ftos, F15_ftos);
1572 __ move_d_to_l();
1573 break;
1574
1575 case Bytecodes::_d2l:
1576 case Bytecodes::_f2l:
1577 __ fcmpu(CCR0, F15_ftos, F15_ftos);
1578 __ li(R17_tos, 0); // 0 in case of NAN
1579 __ bso(CCR0, done);
1580 __ fctidz(F15_ftos, F15_ftos);
1581 __ move_d_to_l();
1582 break;
1583
1584 default: ShouldNotReachHere();
1585 }
1586 __ bind(done);
1587 }
1588
1589 // Long compare
1590 void TemplateTable::lcmp() {
1591 transition(ltos, itos);
1592
1593 const Register Rscratch = R11_scratch1;
1594 __ pop_l(Rscratch); // first operand, deeper in stack
1595
1596 __ cmpd(CCR0, Rscratch, R17_tos); // compare
1597 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1598 __ srwi(Rscratch, R17_tos, 30);
1599 __ srawi(R17_tos, R17_tos, 31);
1600 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1601 }
1602
1603 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes
1604 // unordered_result == -1 => fcmpl or dcmpl
1605 // unordered_result == 1 => fcmpg or dcmpg
1606 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1607 const FloatRegister Rfirst = F0_SCRATCH,
1608 Rsecond = F15_ftos;
1609 const Register Rscratch = R11_scratch1;
1610
1611 if (is_float) {
1612 __ pop_f(Rfirst);
1613 } else {
1614 __ pop_d(Rfirst);
1615 }
1616
1617 Label Lunordered, Ldone;
1618 __ fcmpu(CCR0, Rfirst, Rsecond); // compare
1619 if (unordered_result) {
1620 __ bso(CCR0, Lunordered);
1621 }
1622 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1623 __ srwi(Rscratch, R17_tos, 30);
1624 __ srawi(R17_tos, R17_tos, 31);
1625 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1626 if (unordered_result) {
1627 __ b(Ldone);
1628 __ bind(Lunordered);
1629 __ load_const_optimized(R17_tos, unordered_result);
1630 }
1631 __ bind(Ldone);
1632 }
1633
1634 // Branch_conditional which takes TemplateTable::Condition.
1635 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
1636 bool positive = false;
1637 Assembler::Condition cond = Assembler::equal;
1638 switch (cc) {
1639 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break;
1640 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break;
1641 case TemplateTable::less: positive = true ; cond = Assembler::less ; break;
1642 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break;
1643 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break;
1644 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break;
1645 default: ShouldNotReachHere();
1646 }
1647 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1648 int bi = Assembler::bi0(crx, cond);
1649 __ bc(bo, bi, L);
1650 }
1651
1652 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1653
1654 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1655 __ verify_thread();
1656
1657 const Register Rscratch1 = R11_scratch1,
1658 Rscratch2 = R12_scratch2,
1659 Rscratch3 = R3_ARG1,
1660 R4_counters = R4_ARG2,
1661 bumped_count = R31,
1662 Rdisp = R22_tmp2;
1663
1664 __ profile_taken_branch(Rscratch1, bumped_count);
1665
1666 // Get (wide) offset.
1667 if (is_wide) {
1668 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1669 } else {
1670 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1671 }
1672
1673 // --------------------------------------------------------------------------
1674 // Handle all the JSR stuff here, then exit.
1675 // It's much shorter and cleaner than intermingling with the
1676 // non-JSR normal-branch stuff occurring below.
1677 if (is_jsr) {
1678 // Compute return address as bci in Otos_i.
1679 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1680 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
1681 __ subf(R17_tos, Rscratch1, Rscratch2);
1682
1683 // Bump bcp to target of JSR.
1684 __ add(R14_bcp, Rdisp, R14_bcp);
1685 // Push returnAddress for "ret" on stack.
1686 __ push_ptr(R17_tos);
1687 // And away we go!
1688 __ dispatch_next(vtos, 0 ,true);
1689 return;
1690 }
1691
1692 // --------------------------------------------------------------------------
1693 // Normal (non-jsr) branch handling
1694
1695 // Bump bytecode pointer by displacement (take the branch).
1696 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1697
1698 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1699 if (increment_invocation_counter_for_backward_branches) {
1700 Label Lforward;
1701
1702 // Check branch direction.
1703 __ cmpdi(CCR0, Rdisp, 0);
1704 __ bgt(CCR0, Lforward);
1705
1706 __ get_method_counters(R19_method, R4_counters, Lforward);
1707
1708 if (TieredCompilation) {
1709 Label Lno_mdo, Loverflow;
1710 const int increment = InvocationCounter::count_increment;
1711 if (ProfileInterpreter) {
1712 Register Rmdo = Rscratch1;
1713
1714 // If no method data exists, go to profile_continue.
1715 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
1716 __ cmpdi(CCR0, Rmdo, 0);
1717 __ beq(CCR0, Lno_mdo);
1718
1719 // Increment backedge counter in the MDO.
1720 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1721 __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
1722 __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
1723 __ addi(Rscratch2, Rscratch2, increment);
1724 __ stw(Rscratch2, mdo_bc_offs, Rmdo);
1725 if (UseOnStackReplacement) {
1726 __ and_(Rscratch3, Rscratch2, Rscratch3);
1727 __ bne(CCR0, Lforward);
1728 __ b(Loverflow);
1729 } else {
1730 __ b(Lforward);
1731 }
1732 }
1733
1734 // If there's no MDO, increment counter in method.
1735 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1736 __ bind(Lno_mdo);
1737 __ lwz(Rscratch2, mo_bc_offs, R4_counters);
1738 __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
1739 __ addi(Rscratch2, Rscratch2, increment);
1740 __ stw(Rscratch2, mo_bc_offs, R4_counters);
1741 if (UseOnStackReplacement) {
1742 __ and_(Rscratch3, Rscratch2, Rscratch3);
1743 __ bne(CCR0, Lforward);
1744 } else {
1745 __ b(Lforward);
1746 }
1747 __ bind(Loverflow);
1748
1749 // Notify point for loop, pass branch bytecode.
1750 __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp).
1751 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
1752
1753 // Was an OSR adapter generated?
1754 __ cmpdi(CCR0, R3_RET, 0);
1755 __ beq(CCR0, Lforward);
1756
1757 // Has the nmethod been invalidated already?
1758 __ lbz(R0, nmethod::state_offset(), R3_RET);
1759 __ cmpwi(CCR0, R0, nmethod::in_use);
1760 __ bne(CCR0, Lforward);
1761
1762 // Migrate the interpreter frame off of the stack.
1763 // We can use all registers because we will not return to interpreter from this point.
1764
1765 // Save nmethod.
1766 const Register osr_nmethod = R31;
1767 __ mr(osr_nmethod, R3_RET);
1768 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1769 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1770 __ reset_last_Java_frame();
1771 // OSR buffer is in ARG1.
1772
1773 // Remove the interpreter frame.
1774 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1775
1776 // Jump to the osr code.
1777 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1778 __ mtlr(R0);
1779 __ mtctr(R11_scratch1);
1780 __ bctr();
1781
1782 } else {
1783
1784 const Register invoke_ctr = Rscratch1;
1785 // Update Backedge branch separately from invocations.
1786 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
1787
1788 if (ProfileInterpreter) {
1789 __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward);
1790 if (UseOnStackReplacement) {
1791 __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2);
1792 }
1793 } else {
1794 if (UseOnStackReplacement) {
1795 __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2);
1796 }
1797 }
1798 }
1799
1800 __ bind(Lforward);
1801 }
1802 __ dispatch_next(vtos, 0, true);
1803 }
1804
1805 // Helper function for if_cmp* methods below.
1806 // Factored out common compare and branch code.
1807 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
1808 Label Lnot_taken;
1809 // Note: The condition code we get is the condition under which we
1810 // *fall through*! So we have to inverse the CC here.
1811
1812 if (is_jint) {
1813 if (cmp0) {
1814 __ cmpwi(CCR0, Rfirst, 0);
1815 } else {
1816 __ cmpw(CCR0, Rfirst, Rsecond);
1817 }
1818 } else {
1819 if (cmp0) {
1820 __ cmpdi(CCR0, Rfirst, 0);
1821 } else {
1822 __ cmpd(CCR0, Rfirst, Rsecond);
1823 }
1824 }
1825 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
1826
1827 // Conition is false => Jump!
1828 branch(false, false);
1829
1830 // Condition is not true => Continue.
1831 __ align(32, 12);
1832 __ bind(Lnot_taken);
1833 __ profile_not_taken_branch(Rscratch1, Rscratch2);
1834 }
1835
1836 // Compare integer values with zero and fall through if CC holds, branch away otherwise.
1837 void TemplateTable::if_0cmp(Condition cc) {
1838 transition(itos, vtos);
1839
1840 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
1841 }
1842
1843 // Compare integer values and fall through if CC holds, branch away otherwise.
1844 //
1845 // Interface:
1846 // - Rfirst: First operand (older stack value)
1847 // - tos: Second operand (younger stack value)
1848 void TemplateTable::if_icmp(Condition cc) {
1849 transition(itos, vtos);
1850
1851 const Register Rfirst = R0,
1852 Rsecond = R17_tos;
1853
1854 __ pop_i(Rfirst);
1855 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
1856 }
1857
1858 void TemplateTable::if_nullcmp(Condition cc) {
1859 transition(atos, vtos);
1860
1861 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
1862 }
1863
1864 void TemplateTable::if_acmp(Condition cc) {
1865 transition(atos, vtos);
1866
1867 const Register Rfirst = R0,
1868 Rsecond = R17_tos;
1869
1870 __ pop_ptr(Rfirst);
1871 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
1872 }
1873
1874 void TemplateTable::ret() {
1875 locals_index(R11_scratch1);
1876 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
1877
1878 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
1879
1880 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1881 __ add(R11_scratch1, R17_tos, R11_scratch1);
1882 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1883 __ dispatch_next(vtos, 0, true);
1884 }
1885
1886 void TemplateTable::wide_ret() {
1887 transition(vtos, vtos);
1888
1889 const Register Rindex = R3_ARG1,
1890 Rscratch1 = R11_scratch1,
1891 Rscratch2 = R12_scratch2;
1892
1893 locals_index_wide(Rindex);
1894 __ load_local_ptr(R17_tos, R17_tos, Rindex);
1895 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
1896 // Tos now contains the bci, compute the bcp from that.
1897 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1898 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
1899 __ add(R14_bcp, Rscratch1, Rscratch2);
1900 __ dispatch_next(vtos, 0, true);
1901 }
1902
1903 void TemplateTable::tableswitch() {
1904 transition(itos, vtos);
1905
1906 Label Ldispatch, Ldefault_case;
1907 Register Rlow_byte = R3_ARG1,
1908 Rindex = Rlow_byte,
1909 Rhigh_byte = R4_ARG2,
1910 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset
1911 Rscratch1 = R11_scratch1,
1912 Rscratch2 = R12_scratch2,
1913 Roffset = R6_ARG4;
1914
1915 // Align bcp.
1916 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1917 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1918
1919 // Load lo & hi.
1920 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1921 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
1922
1923 // Check for default case (=index outside [low,high]).
1924 __ cmpw(CCR0, R17_tos, Rlow_byte);
1925 __ cmpw(CCR1, R17_tos, Rhigh_byte);
1926 __ blt(CCR0, Ldefault_case);
1927 __ bgt(CCR1, Ldefault_case);
1928
1929 // Lookup dispatch offset.
1930 __ sub(Rindex, R17_tos, Rlow_byte);
1931 __ extsw(Rindex, Rindex);
1932 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
1933 __ sldi(Rindex, Rindex, LogBytesPerInt);
1934 __ addi(Rindex, Rindex, 3 * BytesPerInt);
1935 #if defined(VM_LITTLE_ENDIAN)
1936 __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
1937 __ extsw(Roffset, Roffset);
1938 #else
1939 __ lwax(Roffset, Rdef_offset_addr, Rindex);
1940 #endif
1941 __ b(Ldispatch);
1942
1943 __ bind(Ldefault_case);
1944 __ profile_switch_default(Rhigh_byte, Rscratch1);
1945 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1946
1947 __ bind(Ldispatch);
1948
1949 __ add(R14_bcp, Roffset, R14_bcp);
1950 __ dispatch_next(vtos, 0, true);
1951 }
1952
1953 void TemplateTable::lookupswitch() {
1954 transition(itos, itos);
1955 __ stop("lookupswitch bytecode should have been rewritten");
1956 }
1957
1958 // Table switch using linear search through cases.
1959 // Bytecode stream format:
1960 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1961 // Note: Everything is big-endian format here.
1962 void TemplateTable::fast_linearswitch() {
1963 transition(itos, vtos);
1964
1965 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
1966 Register Rcount = R3_ARG1,
1967 Rcurrent_pair = R4_ARG2,
1968 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
1969 Roffset = R31, // Might need to survive C call.
1970 Rvalue = R12_scratch2,
1971 Rscratch = R11_scratch1,
1972 Rcmp_value = R17_tos;
1973
1974 // Align bcp.
1975 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1976 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1977
1978 // Setup loop counter and limit.
1979 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1980 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
1981
1982 __ mtctr(Rcount);
1983 __ cmpwi(CCR0, Rcount, 0);
1984 __ bne(CCR0, Lloop_entry);
1985
1986 // Default case
1987 __ bind(Ldefault_case);
1988 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1989 if (ProfileInterpreter) {
1990 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
1991 }
1992 __ b(Lcontinue_execution);
1993
1994 // Next iteration
1995 __ bind(Lsearch_loop);
1996 __ bdz(Ldefault_case);
1997 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
1998 __ bind(Lloop_entry);
1999 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
2000 __ cmpw(CCR0, Rvalue, Rcmp_value);
2001 __ bne(CCR0, Lsearch_loop);
2002
2003 // Found, load offset.
2004 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
2005 // Calculate case index and profile
2006 __ mfctr(Rcurrent_pair);
2007 if (ProfileInterpreter) {
2008 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
2009 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
2010 }
2011
2012 __ bind(Lcontinue_execution);
2013 __ add(R14_bcp, Roffset, R14_bcp);
2014 __ dispatch_next(vtos, 0, true);
2015 }
2016
2017 // Table switch using binary search (value/offset pairs are ordered).
2018 // Bytecode stream format:
2019 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
2020 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
2021 void TemplateTable::fast_binaryswitch() {
2022
2023 transition(itos, vtos);
2024 // Implementation using the following core algorithm: (copied from Intel)
2025 //
2026 // int binary_search(int key, LookupswitchPair* array, int n) {
2027 // // Binary search according to "Methodik des Programmierens" by
2028 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2029 // int i = 0;
2030 // int j = n;
2031 // while (i+1 < j) {
2032 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2033 // // with Q: for all i: 0 <= i < n: key < a[i]
2034 // // where a stands for the array and assuming that the (inexisting)
2035 // // element a[n] is infinitely big.
2036 // int h = (i + j) >> 1;
2037 // // i < h < j
2038 // if (key < array[h].fast_match()) {
2039 // j = h;
2040 // } else {
2041 // i = h;
2042 // }
2043 // }
2044 // // R: a[i] <= key < a[i+1] or Q
2045 // // (i.e., if key is within array, i is the correct index)
2046 // return i;
2047 // }
2048
2049 // register allocation
2050 const Register Rkey = R17_tos; // already set (tosca)
2051 const Register Rarray = R3_ARG1;
2052 const Register Ri = R4_ARG2;
2053 const Register Rj = R5_ARG3;
2054 const Register Rh = R6_ARG4;
2055 const Register Rscratch = R11_scratch1;
2056
2057 const int log_entry_size = 3;
2058 const int entry_size = 1 << log_entry_size;
2059
2060 Label found;
2061
2062 // Find Array start,
2063 __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
2064 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
2065
2066 // initialize i & j
2067 __ li(Ri,0);
2068 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
2069
2070 // and start.
2071 Label entry;
2072 __ b(entry);
2073
2074 // binary search loop
2075 { Label loop;
2076 __ bind(loop);
2077 // int h = (i + j) >> 1;
2078 __ srdi(Rh, Rh, 1);
2079 // if (key < array[h].fast_match()) {
2080 // j = h;
2081 // } else {
2082 // i = h;
2083 // }
2084 __ sldi(Rscratch, Rh, log_entry_size);
2085 #if defined(VM_LITTLE_ENDIAN)
2086 __ lwbrx(Rscratch, Rscratch, Rarray);
2087 #else
2088 __ lwzx(Rscratch, Rscratch, Rarray);
2089 #endif
2090
2091 // if (key < current value)
2092 // Rh = Rj
2093 // else
2094 // Rh = Ri
2095 Label Lgreater;
2096 __ cmpw(CCR0, Rkey, Rscratch);
2097 __ bge(CCR0, Lgreater);
2098 __ mr(Rj, Rh);
2099 __ b(entry);
2100 __ bind(Lgreater);
2101 __ mr(Ri, Rh);
2102
2103 // while (i+1 < j)
2104 __ bind(entry);
2105 __ addi(Rscratch, Ri, 1);
2106 __ cmpw(CCR0, Rscratch, Rj);
2107 __ add(Rh, Ri, Rj); // start h = i + j >> 1;
2108
2109 __ blt(CCR0, loop);
2110 }
2111
2112 // End of binary search, result index is i (must check again!).
2113 Label default_case;
2114 Label continue_execution;
2115 if (ProfileInterpreter) {
2116 __ mr(Rh, Ri); // Save index in i for profiling.
2117 }
2118 // Ri = value offset
2119 __ sldi(Ri, Ri, log_entry_size);
2120 __ add(Ri, Ri, Rarray);
2121 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
2122
2123 Label not_found;
2124 // Ri = offset offset
2125 __ cmpw(CCR0, Rkey, Rscratch);
2126 __ beq(CCR0, not_found);
2127 // entry not found -> j = default offset
2128 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
2129 __ b(default_case);
2130
2131 __ bind(not_found);
2132 // entry found -> j = offset
2133 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2134 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
2135
2136 if (ProfileInterpreter) {
2137 __ b(continue_execution);
2138 }
2139
2140 __ bind(default_case); // fall through (if not profiling)
2141 __ profile_switch_default(Ri, Rscratch);
2142
2143 __ bind(continue_execution);
2144
2145 __ extsw(Rj, Rj);
2146 __ add(R14_bcp, Rj, R14_bcp);
2147 __ dispatch_next(vtos, 0 , true);
2148 }
2149
2150 void TemplateTable::_return(TosState state) {
2151 transition(state, state);
2152 assert(_desc->calls_vm(),
2153 "inconsistent calls_vm information"); // call in remove_activation
2154
2155 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2156
2157 Register Rscratch = R11_scratch1,
2158 Rklass = R12_scratch2,
2159 Rklass_flags = Rklass;
2160 Label Lskip_register_finalizer;
2161
2162 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
2163 assert(state == vtos, "only valid state");
2164 __ ld(R17_tos, 0, R18_locals);
2165
2166 // Load klass of this obj.
2167 __ load_klass(Rklass, R17_tos);
2168 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
2169 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
2170 __ bfalse(CCR0, Lskip_register_finalizer);
2171
2172 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
2173
2174 __ align(32, 12);
2175 __ bind(Lskip_register_finalizer);
2176 }
2177
2178 if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2179 Label no_safepoint;
2180 __ ld(R11_scratch1, in_bytes(Thread::polling_page_offset()), R16_thread);
2181 __ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
2182 __ beq(CCR0, no_safepoint);
2183 __ push(state);
2184 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
2185 __ pop(state);
2186 __ bind(no_safepoint);
2187 }
2188
2189 // Move the result value into the correct register and remove memory stack frame.
2190 __ remove_activation(state, /* throw_monitor_exception */ true);
2191 // Restoration of lr done by remove_activation.
2192 switch (state) {
2193 // Narrow result if state is itos but result type is smaller.
2194 // Need to narrow in the return bytecode rather than in generate_return_entry
2195 // since compiled code callers expect the result to already be narrowed.
2196 case itos: __ narrow(R17_tos); /* fall through */
2197 case ltos:
2198 case atos: __ mr(R3_RET, R17_tos); break;
2199 case ftos:
2200 case dtos: __ fmr(F1_RET, F15_ftos); break;
2201 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
2202 // to get visible before the reference to the object gets stored anywhere.
2203 __ membar(Assembler::StoreStore); break;
2204 default : ShouldNotReachHere();
2205 }
2206 __ blr();
2207 }
2208
2209 // ============================================================================
2210 // Constant pool cache access
2211 //
2212 // Memory ordering:
2213 //
2214 // Like done in C++ interpreter, we load the fields
2215 // - _indices
2216 // - _f12_oop
2217 // acquired, because these are asked if the cache is already resolved. We don't
2218 // want to float loads above this check.
2219 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2220 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2221
2222 // Call into the VM if call site is not yet resolved
2223 //
2224 // Input regs:
2225 // - None, all passed regs are outputs.
2226 //
2227 // Returns:
2228 // - Rcache: The const pool cache entry that contains the resolved result.
2229 // - Rresult: Either noreg or output for f1/f2.
2230 //
2231 // Kills:
2232 // - Rscratch
2233 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2234
2235 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2236 Label Lresolved, Ldone, L_clinit_barrier_slow;
2237
2238 Bytecodes::Code code = bytecode();
2239 switch (code) {
2240 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2241 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2242 default:
2243 break;
2244 }
2245
2246 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2247 // We are resolved if the indices offset contains the current bytecode.
2248 #if defined(VM_LITTLE_ENDIAN)
2249 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2250 #else
2251 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2252 #endif
2253 // Acquire by cmp-br-isync (see below).
2254 __ cmpdi(CCR0, Rscratch, (int)code);
2255 __ beq(CCR0, Lresolved);
2256
2257 // Class initialization barrier slow path lands here as well.
2258 __ bind(L_clinit_barrier_slow);
2259
2260 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2261 __ li(R4_ARG2, code);
2262 __ call_VM(noreg, entry, R4_ARG2, true);
2263
2264 // Update registers with resolved info.
2265 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2266 __ b(Ldone);
2267
2268 __ bind(Lresolved);
2269 __ isync(); // Order load wrt. succeeding loads.
2270
2271 // Class initialization barrier for static methods
2272 if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2273 const Register method = Rscratch;
2274 const Register klass = Rscratch;
2275
2276 __ load_resolved_method_at_index(byte_no, Rcache, method);
2277 __ load_method_holder(klass, method);
2278 __ clinit_barrier(klass, R16_thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2279 }
2280
2281 __ bind(Ldone);
2282 }
2283
2284 // Load the constant pool cache entry at field accesses into registers.
2285 // The Rcache and Rindex registers must be set before call.
2286 // Input:
2287 // - Rcache, Rindex
2288 // Output:
2289 // - Robj, Roffset, Rflags
2290 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2291 Register Rcache,
2292 Register Rindex /* unused on PPC64 */,
2293 Register Roffset,
2294 Register Rflags,
2295 bool is_static = false) {
2296 assert_different_registers(Rcache, Rflags, Roffset);
2297 // assert(Rindex == noreg, "parameter not used on PPC64");
2298
2299 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2300 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
2301 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
2302 if (is_static) {
2303 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
2304 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
2305 __ resolve_oop_handle(Robj);
2306 // Acquire not needed here. Following access has an address dependency on this value.
2307 }
2308 }
2309
2310 // Load the constant pool cache entry at invokes into registers.
2311 // Resolve if necessary.
2312
2313 // Input Registers:
2314 // - None, bcp is used, though
2315 //
2316 // Return registers:
2317 // - Rmethod (f1 field or f2 if invokevirtual)
2318 // - Ritable_index (f2 field)
2319 // - Rflags (flags field)
2320 //
2321 // Kills:
2322 // - R21
2323 //
2324 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2325 Register Rmethod,
2326 Register Ritable_index,
2327 Register Rflags,
2328 bool is_invokevirtual,
2329 bool is_invokevfinal,
2330 bool is_invokedynamic) {
2331
2332 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2333 // Determine constant pool cache field offsets.
2334 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2335 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
2336 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
2337 // Access constant pool cache fields.
2338 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
2339
2340 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
2341
2342 if (is_invokevfinal) {
2343 assert(Ritable_index == noreg, "register not used");
2344 // Already resolved.
2345 __ get_cache_and_index_at_bcp(Rcache, 1);
2346 } else {
2347 resolve_cache_and_index(byte_no, Rcache, /* temp */ Rmethod, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2348 }
2349
2350 __ ld(Rmethod, method_offset, Rcache);
2351 __ ld(Rflags, flags_offset, Rcache);
2352
2353 if (Ritable_index != noreg) {
2354 __ ld(Ritable_index, index_offset, Rcache);
2355 }
2356 }
2357
2358 // ============================================================================
2359 // Field access
2360
2361 // Volatile variables demand their effects be made known to all CPU's
2362 // in order. Store buffers on most chips allow reads & writes to
2363 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2364 // without some kind of memory barrier (i.e., it's not sufficient that
2365 // the interpreter does not reorder volatile references, the hardware
2366 // also must not reorder them).
2367 //
2368 // According to the new Java Memory Model (JMM):
2369 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2370 // writes act as aquire & release, so:
2371 // (2) A read cannot let unrelated NON-volatile memory refs that
2372 // happen after the read float up to before the read. It's OK for
2373 // non-volatile memory refs that happen before the volatile read to
2374 // float down below it.
2375 // (3) Similar a volatile write cannot let unrelated NON-volatile
2376 // memory refs that happen BEFORE the write float down to after the
2377 // write. It's OK for non-volatile memory refs that happen after the
2378 // volatile write to float up before it.
2379 //
2380 // We only put in barriers around volatile refs (they are expensive),
2381 // not _between_ memory refs (that would require us to track the
2382 // flavor of the previous memory refs). Requirements (2) and (3)
2383 // require some barriers before volatile stores and after volatile
2384 // loads. These nearly cover requirement (1) but miss the
2385 // volatile-store-volatile-load case. This final case is placed after
2386 // volatile-stores although it could just as well go before
2387 // volatile-loads.
2388
2389 // The registers cache and index expected to be set before call.
2390 // Correct values of the cache and index registers are preserved.
2391 // Kills:
2392 // Rcache (if has_tos)
2393 // Rscratch
2394 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
2395
2396 assert_different_registers(Rcache, Rscratch);
2397
2398 if (JvmtiExport::can_post_field_access()) {
2399 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2400 Label Lno_field_access_post;
2401
2402 // Check if post field access in enabled.
2403 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
2404 __ lwz(Rscratch, offs, Rscratch);
2405
2406 __ cmpwi(CCR0, Rscratch, 0);
2407 __ beq(CCR0, Lno_field_access_post);
2408
2409 // Post access enabled - do it!
2410 __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2411 if (is_static) {
2412 __ li(R17_tos, 0);
2413 } else {
2414 if (has_tos) {
2415 // The fast bytecode versions have obj ptr in register.
2416 // Thus, save object pointer before call_VM() clobbers it
2417 // put object on tos where GC wants it.
2418 __ push_ptr(R17_tos);
2419 } else {
2420 // Load top of stack (do not pop the value off the stack).
2421 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
2422 }
2423 __ verify_oop(R17_tos);
2424 }
2425 // tos: object pointer or NULL if static
2426 // cache: cache entry pointer
2427 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
2428 if (!is_static && has_tos) {
2429 // Restore object pointer.
2430 __ pop_ptr(R17_tos);
2431 __ verify_oop(R17_tos);
2432 } else {
2433 // Cache is still needed to get class or obj.
2434 __ get_cache_and_index_at_bcp(Rcache, 1);
2435 }
2436
2437 __ align(32, 12);
2438 __ bind(Lno_field_access_post);
2439 }
2440 }
2441
2442 // kills R11_scratch1
2443 void TemplateTable::pop_and_check_object(Register Roop) {
2444 Register Rtmp = R11_scratch1;
2445
2446 assert_different_registers(Rtmp, Roop);
2447 __ pop_ptr(Roop);
2448 // For field access must check obj.
2449 __ null_check_throw(Roop, -1, Rtmp);
2450 __ verify_oop(Roop);
2451 }
2452
2453 // PPC64: implement volatile loads as fence-store-acquire.
2454 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2455 transition(vtos, vtos);
2456
2457 Label Lacquire, Lisync;
2458
2459 const Register Rcache = R3_ARG1,
2460 Rclass_or_obj = R22_tmp2,
2461 Roffset = R23_tmp3,
2462 Rflags = R31,
2463 Rbtable = R5_ARG3,
2464 Rbc = R6_ARG4,
2465 Rscratch = R12_scratch2;
2466
2467 static address field_branch_table[number_of_states],
2468 static_branch_table[number_of_states];
2469
2470 address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table;
2471
2472 // Get field offset.
2473 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2474
2475 // JVMTI support
2476 jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2477
2478 // Load after possible GC.
2479 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2480
2481 // Load pointer to branch table.
2482 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2483
2484 // Get volatile flag.
2485 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2486 // Note: sync is needed before volatile load on PPC64.
2487
2488 // Check field type.
2489 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2490
2491 #ifdef ASSERT
2492 Label LFlagInvalid;
2493 __ cmpldi(CCR0, Rflags, number_of_states);
2494 __ bge(CCR0, LFlagInvalid);
2495 #endif
2496
2497 // Load from branch table and dispatch (volatile case: one instruction ahead).
2498 __ sldi(Rflags, Rflags, LogBytesPerWord);
2499 __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2500 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2501 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2502 }
2503 __ ldx(Rbtable, Rbtable, Rflags);
2504
2505 // Get the obj from stack.
2506 if (!is_static) {
2507 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2508 } else {
2509 __ verify_oop(Rclass_or_obj);
2510 }
2511
2512 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2513 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2514 }
2515 __ mtctr(Rbtable);
2516 __ bctr();
2517
2518 #ifdef ASSERT
2519 __ bind(LFlagInvalid);
2520 __ stop("got invalid flag", 0x654);
2521 #endif
2522
2523 if (!is_static && rc == may_not_rewrite) {
2524 // We reuse the code from is_static. It's jumped to via the table above.
2525 return;
2526 }
2527
2528 #ifdef ASSERT
2529 // __ bind(Lvtos);
2530 address pc_before_fence = __ pc();
2531 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2532 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2533 assert(branch_table[vtos] == 0, "can't compute twice");
2534 branch_table[vtos] = __ pc(); // non-volatile_entry point
2535 __ stop("vtos unexpected", 0x655);
2536 #endif
2537
2538 __ align(32, 28, 28); // Align load.
2539 // __ bind(Ldtos);
2540 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2541 assert(branch_table[dtos] == 0, "can't compute twice");
2542 branch_table[dtos] = __ pc(); // non-volatile_entry point
2543 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2544 __ push(dtos);
2545 if (!is_static && rc == may_rewrite) {
2546 patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2547 }
2548 {
2549 Label acquire_double;
2550 __ beq(CCR6, acquire_double); // Volatile?
2551 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2552
2553 __ bind(acquire_double);
2554 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2555 __ beq_predict_taken(CCR0, Lisync);
2556 __ b(Lisync); // In case of NAN.
2557 }
2558
2559 __ align(32, 28, 28); // Align load.
2560 // __ bind(Lftos);
2561 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2562 assert(branch_table[ftos] == 0, "can't compute twice");
2563 branch_table[ftos] = __ pc(); // non-volatile_entry point
2564 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2565 __ push(ftos);
2566 if (!is_static && rc == may_rewrite) {
2567 patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
2568 }
2569 {
2570 Label acquire_float;
2571 __ beq(CCR6, acquire_float); // Volatile?
2572 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2573
2574 __ bind(acquire_float);
2575 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2576 __ beq_predict_taken(CCR0, Lisync);
2577 __ b(Lisync); // In case of NAN.
2578 }
2579
2580 __ align(32, 28, 28); // Align load.
2581 // __ bind(Litos);
2582 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2583 assert(branch_table[itos] == 0, "can't compute twice");
2584 branch_table[itos] = __ pc(); // non-volatile_entry point
2585 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2586 __ push(itos);
2587 if (!is_static && rc == may_rewrite) {
2588 patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2589 }
2590 __ beq(CCR6, Lacquire); // Volatile?
2591 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2592
2593 __ align(32, 28, 28); // Align load.
2594 // __ bind(Lltos);
2595 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2596 assert(branch_table[ltos] == 0, "can't compute twice");
2597 branch_table[ltos] = __ pc(); // non-volatile_entry point
2598 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2599 __ push(ltos);
2600 if (!is_static && rc == may_rewrite) {
2601 patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2602 }
2603 __ beq(CCR6, Lacquire); // Volatile?
2604 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2605
2606 __ align(32, 28, 28); // Align load.
2607 // __ bind(Lbtos);
2608 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2609 assert(branch_table[btos] == 0, "can't compute twice");
2610 branch_table[btos] = __ pc(); // non-volatile_entry point
2611 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2612 __ extsb(R17_tos, R17_tos);
2613 __ push(btos);
2614 if (!is_static && rc == may_rewrite) {
2615 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2616 }
2617 __ beq(CCR6, Lacquire); // Volatile?
2618 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2619
2620 __ align(32, 28, 28); // Align load.
2621 // __ bind(Lztos); (same code as btos)
2622 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2623 assert(branch_table[ztos] == 0, "can't compute twice");
2624 branch_table[ztos] = __ pc(); // non-volatile_entry point
2625 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2626 __ push(ztos);
2627 if (!is_static && rc == may_rewrite) {
2628 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2629 patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2630 }
2631 __ beq(CCR6, Lacquire); // Volatile?
2632 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2633
2634 __ align(32, 28, 28); // Align load.
2635 // __ bind(Lctos);
2636 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2637 assert(branch_table[ctos] == 0, "can't compute twice");
2638 branch_table[ctos] = __ pc(); // non-volatile_entry point
2639 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2640 __ push(ctos);
2641 if (!is_static && rc == may_rewrite) {
2642 patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2643 }
2644 __ beq(CCR6, Lacquire); // Volatile?
2645 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2646
2647 __ align(32, 28, 28); // Align load.
2648 // __ bind(Lstos);
2649 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2650 assert(branch_table[stos] == 0, "can't compute twice");
2651 branch_table[stos] = __ pc(); // non-volatile_entry point
2652 __ lhax(R17_tos, Rclass_or_obj, Roffset);
2653 __ push(stos);
2654 if (!is_static && rc == may_rewrite) {
2655 patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2656 }
2657 __ beq(CCR6, Lacquire); // Volatile?
2658 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2659
2660 __ align(32, 28, 28); // Align load.
2661 // __ bind(Latos);
2662 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2663 assert(branch_table[atos] == 0, "can't compute twice");
2664 branch_table[atos] = __ pc(); // non-volatile_entry point
2665 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
2666 __ verify_oop(R17_tos);
2667 __ push(atos);
2668 //__ dcbt(R17_tos); // prefetch
2669 if (!is_static && rc == may_rewrite) {
2670 patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2671 }
2672 __ beq(CCR6, Lacquire); // Volatile?
2673 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2674
2675 __ align(32, 12);
2676 __ bind(Lacquire);
2677 __ twi_0(R17_tos);
2678 __ bind(Lisync);
2679 __ isync(); // acquire
2680
2681 #ifdef ASSERT
2682 for (int i = 0; i<number_of_states; ++i) {
2683 assert(branch_table[i], "get initialization");
2684 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2685 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2686 }
2687 #endif
2688 }
2689
2690 void TemplateTable::getfield(int byte_no) {
2691 getfield_or_static(byte_no, false);
2692 }
2693
2694 void TemplateTable::nofast_getfield(int byte_no) {
2695 getfield_or_static(byte_no, false, may_not_rewrite);
2696 }
2697
2698 void TemplateTable::getstatic(int byte_no) {
2699 getfield_or_static(byte_no, true);
2700 }
2701
2702 // The registers cache and index expected to be set before call.
2703 // The function may destroy various registers, just not the cache and index registers.
2704 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2705
2706 assert_different_registers(Rcache, Rscratch, R6_ARG4);
2707
2708 if (JvmtiExport::can_post_field_modification()) {
2709 Label Lno_field_mod_post;
2710
2711 // Check if post field access in enabled.
2712 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2713 __ lwz(Rscratch, offs, Rscratch);
2714
2715 __ cmpwi(CCR0, Rscratch, 0);
2716 __ beq(CCR0, Lno_field_mod_post);
2717
2718 // Do the post
2719 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2720 const Register Robj = Rscratch;
2721
2722 __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2723 if (is_static) {
2724 // Life is simple. Null out the object pointer.
2725 __ li(Robj, 0);
2726 } else {
2727 // In case of the fast versions, value lives in registers => put it back on tos.
2728 int offs = Interpreter::expr_offset_in_bytes(0);
2729 Register base = R15_esp;
2730 switch(bytecode()) {
2731 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
2732 case Bytecodes::_fast_iputfield: // Fall through
2733 case Bytecodes::_fast_bputfield: // Fall through
2734 case Bytecodes::_fast_zputfield: // Fall through
2735 case Bytecodes::_fast_cputfield: // Fall through
2736 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break;
2737 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
2738 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break;
2739 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
2740 default: {
2741 offs = 0;
2742 base = Robj;
2743 const Register Rflags = Robj;
2744 Label is_one_slot;
2745 // Life is harder. The stack holds the value on top, followed by the
2746 // object. We don't know the size of the value, though; it could be
2747 // one or two words depending on its type. As a result, we must find
2748 // the type to determine where the object is.
2749 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
2750 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2751
2752 __ cmpwi(CCR0, Rflags, ltos);
2753 __ cmpwi(CCR1, Rflags, dtos);
2754 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
2755 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal);
2756 __ beq(CCR0, is_one_slot);
2757 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
2758 __ bind(is_one_slot);
2759 break;
2760 }
2761 }
2762 __ ld(Robj, offs, base);
2763 __ verify_oop(Robj);
2764 }
2765
2766 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
2767 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
2768 __ get_cache_and_index_at_bcp(Rcache, 1);
2769
2770 // In case of the fast versions, value lives in registers => put it back on tos.
2771 switch(bytecode()) {
2772 case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2773 case Bytecodes::_fast_iputfield: // Fall through
2774 case Bytecodes::_fast_bputfield: // Fall through
2775 case Bytecodes::_fast_zputfield: // Fall through
2776 case Bytecodes::_fast_cputfield: // Fall through
2777 case Bytecodes::_fast_sputfield: __ pop_i(); break;
2778 case Bytecodes::_fast_lputfield: __ pop_l(); break;
2779 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2780 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2781 default: break; // Nothin' to do.
2782 }
2783
2784 __ align(32, 12);
2785 __ bind(Lno_field_mod_post);
2786 }
2787 }
2788
2789 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2790 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2791 Label Lvolatile;
2792
2793 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2794 Rclass_or_obj = R31, // Needs to survive C call.
2795 Roffset = R22_tmp2, // Needs to survive C call.
2796 Rflags = R3_ARG1,
2797 Rbtable = R4_ARG2,
2798 Rscratch = R11_scratch1,
2799 Rscratch2 = R12_scratch2,
2800 Rscratch3 = R6_ARG4,
2801 Rbc = Rscratch3;
2802 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2803
2804 static address field_rw_branch_table[number_of_states],
2805 field_norw_branch_table[number_of_states],
2806 static_branch_table[number_of_states];
2807
2808 address* branch_table = is_static ? static_branch_table :
2809 (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table);
2810
2811 // Stack (grows up):
2812 // value
2813 // obj
2814
2815 // Load the field offset.
2816 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2817 jvmti_post_field_mod(Rcache, Rscratch, is_static);
2818 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2819
2820 // Load pointer to branch table.
2821 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2822
2823 // Get volatile flag.
2824 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2825
2826 // Check the field type.
2827 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2828
2829 #ifdef ASSERT
2830 Label LFlagInvalid;
2831 __ cmpldi(CCR0, Rflags, number_of_states);
2832 __ bge(CCR0, LFlagInvalid);
2833 #endif
2834
2835 // Load from branch table and dispatch (volatile case: one instruction ahead).
2836 __ sldi(Rflags, Rflags, LogBytesPerWord);
2837 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2838 __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile?
2839 }
2840 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2841 __ ldx(Rbtable, Rbtable, Rflags);
2842
2843 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2844 __ mtctr(Rbtable);
2845 __ bctr();
2846
2847 #ifdef ASSERT
2848 __ bind(LFlagInvalid);
2849 __ stop("got invalid flag", 0x656);
2850
2851 // __ bind(Lvtos);
2852 address pc_before_release = __ pc();
2853 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2854 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2855 assert(branch_table[vtos] == 0, "can't compute twice");
2856 branch_table[vtos] = __ pc(); // non-volatile_entry point
2857 __ stop("vtos unexpected", 0x657);
2858 #endif
2859
2860 __ align(32, 28, 28); // Align pop.
2861 // __ bind(Ldtos);
2862 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2863 assert(branch_table[dtos] == 0, "can't compute twice");
2864 branch_table[dtos] = __ pc(); // non-volatile_entry point
2865 __ pop(dtos);
2866 if (!is_static) {
2867 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2868 }
2869 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2870 if (!is_static && rc == may_rewrite) {
2871 patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2872 }
2873 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2874 __ beq(CR_is_vol, Lvolatile); // Volatile?
2875 }
2876 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2877
2878 __ align(32, 28, 28); // Align pop.
2879 // __ bind(Lftos);
2880 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2881 assert(branch_table[ftos] == 0, "can't compute twice");
2882 branch_table[ftos] = __ pc(); // non-volatile_entry point
2883 __ pop(ftos);
2884 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2885 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2886 if (!is_static && rc == may_rewrite) {
2887 patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2888 }
2889 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2890 __ beq(CR_is_vol, Lvolatile); // Volatile?
2891 }
2892 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2893
2894 __ align(32, 28, 28); // Align pop.
2895 // __ bind(Litos);
2896 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2897 assert(branch_table[itos] == 0, "can't compute twice");
2898 branch_table[itos] = __ pc(); // non-volatile_entry point
2899 __ pop(itos);
2900 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2901 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2902 if (!is_static && rc == may_rewrite) {
2903 patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2904 }
2905 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2906 __ beq(CR_is_vol, Lvolatile); // Volatile?
2907 }
2908 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2909
2910 __ align(32, 28, 28); // Align pop.
2911 // __ bind(Lltos);
2912 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2913 assert(branch_table[ltos] == 0, "can't compute twice");
2914 branch_table[ltos] = __ pc(); // non-volatile_entry point
2915 __ pop(ltos);
2916 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2917 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2918 if (!is_static && rc == may_rewrite) {
2919 patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2920 }
2921 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2922 __ beq(CR_is_vol, Lvolatile); // Volatile?
2923 }
2924 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2925
2926 __ align(32, 28, 28); // Align pop.
2927 // __ bind(Lbtos);
2928 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2929 assert(branch_table[btos] == 0, "can't compute twice");
2930 branch_table[btos] = __ pc(); // non-volatile_entry point
2931 __ pop(btos);
2932 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2933 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2934 if (!is_static && rc == may_rewrite) {
2935 patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2936 }
2937 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2938 __ beq(CR_is_vol, Lvolatile); // Volatile?
2939 }
2940 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2941
2942 __ align(32, 28, 28); // Align pop.
2943 // __ bind(Lztos);
2944 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2945 assert(branch_table[ztos] == 0, "can't compute twice");
2946 branch_table[ztos] = __ pc(); // non-volatile_entry point
2947 __ pop(ztos);
2948 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2949 __ andi(R17_tos, R17_tos, 0x1);
2950 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2951 if (!is_static && rc == may_rewrite) {
2952 patch_bytecode(Bytecodes::_fast_zputfield, Rbc, Rscratch, true, byte_no);
2953 }
2954 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2955 __ beq(CR_is_vol, Lvolatile); // Volatile?
2956 }
2957 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2958
2959 __ align(32, 28, 28); // Align pop.
2960 // __ bind(Lctos);
2961 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2962 assert(branch_table[ctos] == 0, "can't compute twice");
2963 branch_table[ctos] = __ pc(); // non-volatile_entry point
2964 __ pop(ctos);
2965 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2966 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2967 if (!is_static && rc == may_rewrite) {
2968 patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2969 }
2970 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2971 __ beq(CR_is_vol, Lvolatile); // Volatile?
2972 }
2973 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2974
2975 __ align(32, 28, 28); // Align pop.
2976 // __ bind(Lstos);
2977 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2978 assert(branch_table[stos] == 0, "can't compute twice");
2979 branch_table[stos] = __ pc(); // non-volatile_entry point
2980 __ pop(stos);
2981 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2982 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2983 if (!is_static && rc == may_rewrite) {
2984 patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2985 }
2986 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2987 __ beq(CR_is_vol, Lvolatile); // Volatile?
2988 }
2989 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2990
2991 __ align(32, 28, 28); // Align pop.
2992 // __ bind(Latos);
2993 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2994 assert(branch_table[atos] == 0, "can't compute twice");
2995 branch_table[atos] = __ pc(); // non-volatile_entry point
2996 __ pop(atos);
2997 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2998 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
2999 if (!is_static && rc == may_rewrite) {
3000 patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
3001 }
3002 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
3003 __ beq(CR_is_vol, Lvolatile); // Volatile?
3004 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3005
3006 __ align(32, 12);
3007 __ bind(Lvolatile);
3008 __ fence();
3009 }
3010 // fallthru: __ b(Lexit);
3011
3012 #ifdef ASSERT
3013 for (int i = 0; i<number_of_states; ++i) {
3014 assert(branch_table[i], "put initialization");
3015 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
3016 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
3017 }
3018 #endif
3019 }
3020
3021 void TemplateTable::putfield(int byte_no) {
3022 putfield_or_static(byte_no, false);
3023 }
3024
3025 void TemplateTable::nofast_putfield(int byte_no) {
3026 putfield_or_static(byte_no, false, may_not_rewrite);
3027 }
3028
3029 void TemplateTable::putstatic(int byte_no) {
3030 putfield_or_static(byte_no, true);
3031 }
3032
3033 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
3034 void TemplateTable::jvmti_post_fast_field_mod() {
3035 __ should_not_reach_here();
3036 }
3037
3038 void TemplateTable::fast_storefield(TosState state) {
3039 transition(state, vtos);
3040
3041 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
3042 Rclass_or_obj = R31, // Needs to survive C call.
3043 Roffset = R22_tmp2, // Needs to survive C call.
3044 Rflags = R3_ARG1,
3045 Rscratch = R11_scratch1,
3046 Rscratch2 = R12_scratch2,
3047 Rscratch3 = R4_ARG2;
3048 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
3049
3050 // Constant pool already resolved => Load flags and offset of field.
3051 __ get_cache_and_index_at_bcp(Rcache, 1);
3052 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
3053 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3054
3055 // Get the obj and the final store addr.
3056 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
3057
3058 // Get volatile flag.
3059 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3060 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
3061 {
3062 Label LnotVolatile;
3063 __ beq(CCR0, LnotVolatile);
3064 __ release();
3065 __ align(32, 12);
3066 __ bind(LnotVolatile);
3067 }
3068
3069 // Do the store and fencing.
3070 switch(bytecode()) {
3071 case Bytecodes::_fast_aputfield:
3072 // Store into the field.
3073 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, IN_HEAP);
3074 break;
3075
3076 case Bytecodes::_fast_iputfield:
3077 __ stwx(R17_tos, Rclass_or_obj, Roffset);
3078 break;
3079
3080 case Bytecodes::_fast_lputfield:
3081 __ stdx(R17_tos, Rclass_or_obj, Roffset);
3082 break;
3083
3084 case Bytecodes::_fast_zputfield:
3085 __ andi(R17_tos, R17_tos, 0x1); // boolean is true if LSB is 1
3086 // fall through to bputfield
3087 case Bytecodes::_fast_bputfield:
3088 __ stbx(R17_tos, Rclass_or_obj, Roffset);
3089 break;
3090
3091 case Bytecodes::_fast_cputfield:
3092 case Bytecodes::_fast_sputfield:
3093 __ sthx(R17_tos, Rclass_or_obj, Roffset);
3094 break;
3095
3096 case Bytecodes::_fast_fputfield:
3097 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
3098 break;
3099
3100 case Bytecodes::_fast_dputfield:
3101 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
3102 break;
3103
3104 default: ShouldNotReachHere();
3105 }
3106
3107 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
3108 Label LVolatile;
3109 __ beq(CR_is_vol, LVolatile);
3110 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
3111
3112 __ align(32, 12);
3113 __ bind(LVolatile);
3114 __ fence();
3115 }
3116 }
3117
3118 void TemplateTable::fast_accessfield(TosState state) {
3119 transition(atos, state);
3120
3121 Label LisVolatile;
3122 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3123
3124 const Register Rcache = R3_ARG1,
3125 Rclass_or_obj = R17_tos,
3126 Roffset = R22_tmp2,
3127 Rflags = R23_tmp3,
3128 Rscratch = R12_scratch2;
3129
3130 // Constant pool already resolved. Get the field offset.
3131 __ get_cache_and_index_at_bcp(Rcache, 1);
3132 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3133
3134 // JVMTI support
3135 jvmti_post_field_access(Rcache, Rscratch, false, true);
3136
3137 // Get the load address.
3138 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3139
3140 // Get volatile flag.
3141 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3142 __ bne(CCR0, LisVolatile);
3143
3144 switch(bytecode()) {
3145 case Bytecodes::_fast_agetfield:
3146 {
3147 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3148 __ verify_oop(R17_tos);
3149 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3150
3151 __ bind(LisVolatile);
3152 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3153 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3154 __ verify_oop(R17_tos);
3155 __ twi_0(R17_tos);
3156 __ isync();
3157 break;
3158 }
3159 case Bytecodes::_fast_igetfield:
3160 {
3161 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3162 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3163
3164 __ bind(LisVolatile);
3165 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3166 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3167 __ twi_0(R17_tos);
3168 __ isync();
3169 break;
3170 }
3171 case Bytecodes::_fast_lgetfield:
3172 {
3173 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3174 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3175
3176 __ bind(LisVolatile);
3177 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3178 __ ldx(R17_tos, Rclass_or_obj, Roffset);
3179 __ twi_0(R17_tos);
3180 __ isync();
3181 break;
3182 }
3183 case Bytecodes::_fast_bgetfield:
3184 {
3185 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3186 __ extsb(R17_tos, R17_tos);
3187 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3188
3189 __ bind(LisVolatile);
3190 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3191 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3192 __ twi_0(R17_tos);
3193 __ extsb(R17_tos, R17_tos);
3194 __ isync();
3195 break;
3196 }
3197 case Bytecodes::_fast_cgetfield:
3198 {
3199 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3200 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3201
3202 __ bind(LisVolatile);
3203 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3204 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3205 __ twi_0(R17_tos);
3206 __ isync();
3207 break;
3208 }
3209 case Bytecodes::_fast_sgetfield:
3210 {
3211 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3212 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3213
3214 __ bind(LisVolatile);
3215 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3216 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3217 __ twi_0(R17_tos);
3218 __ isync();
3219 break;
3220 }
3221 case Bytecodes::_fast_fgetfield:
3222 {
3223 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3224 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3225
3226 __ bind(LisVolatile);
3227 Label Ldummy;
3228 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3229 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3230 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3231 __ bne_predict_not_taken(CCR0, Ldummy);
3232 __ bind(Ldummy);
3233 __ isync();
3234 break;
3235 }
3236 case Bytecodes::_fast_dgetfield:
3237 {
3238 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3239 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3240
3241 __ bind(LisVolatile);
3242 Label Ldummy;
3243 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3244 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3245 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3246 __ bne_predict_not_taken(CCR0, Ldummy);
3247 __ bind(Ldummy);
3248 __ isync();
3249 break;
3250 }
3251 default: ShouldNotReachHere();
3252 }
3253 }
3254
3255 void TemplateTable::fast_xaccess(TosState state) {
3256 transition(vtos, state);
3257
3258 Label LisVolatile;
3259 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3260 const Register Rcache = R3_ARG1,
3261 Rclass_or_obj = R17_tos,
3262 Roffset = R22_tmp2,
3263 Rflags = R23_tmp3,
3264 Rscratch = R12_scratch2;
3265
3266 __ ld(Rclass_or_obj, 0, R18_locals);
3267
3268 // Constant pool already resolved. Get the field offset.
3269 __ get_cache_and_index_at_bcp(Rcache, 2);
3270 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3271
3272 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3273
3274 // Needed to report exception at the correct bcp.
3275 __ addi(R14_bcp, R14_bcp, 1);
3276
3277 // Get the load address.
3278 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3279
3280 // Get volatile flag.
3281 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3282 __ bne(CCR0, LisVolatile);
3283
3284 switch(state) {
3285 case atos:
3286 {
3287 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3288 __ verify_oop(R17_tos);
3289 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3290
3291 __ bind(LisVolatile);
3292 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3293 do_oop_load(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, /* nv temp */ Rflags, IN_HEAP);
3294 __ verify_oop(R17_tos);
3295 __ twi_0(R17_tos);
3296 __ isync();
3297 break;
3298 }
3299 case itos:
3300 {
3301 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3302 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3303
3304 __ bind(LisVolatile);
3305 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3306 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3307 __ twi_0(R17_tos);
3308 __ isync();
3309 break;
3310 }
3311 case ftos:
3312 {
3313 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3314 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3315
3316 __ bind(LisVolatile);
3317 Label Ldummy;
3318 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3319 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3320 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3321 __ bne_predict_not_taken(CCR0, Ldummy);
3322 __ bind(Ldummy);
3323 __ isync();
3324 break;
3325 }
3326 default: ShouldNotReachHere();
3327 }
3328 __ addi(R14_bcp, R14_bcp, -1);
3329 }
3330
3331 // ============================================================================
3332 // Calls
3333
3334 // Common code for invoke
3335 //
3336 // Input:
3337 // - byte_no
3338 //
3339 // Output:
3340 // - Rmethod: The method to invoke next or i-klass (invokeinterface).
3341 // - Rret_addr: The return address to return to.
3342 // - Rindex: MethodType (invokehandle), CallSite obj (invokedynamic) or Method (invokeinterface)
3343 // - Rrecv: Cache for "this" pointer, might be noreg if static call.
3344 // - Rflags: Method flags from const pool cache.
3345 //
3346 // Kills:
3347 // - Rscratch1
3348 //
3349 void TemplateTable::prepare_invoke(int byte_no,
3350 Register Rmethod, // linked method (or i-klass)
3351 Register Rret_addr,// return address
3352 Register Rindex, // itable index, MethodType, Method, etc.
3353 Register Rrecv, // If caller wants to see it.
3354 Register Rflags, // If caller wants to test it.
3355 Register Rscratch
3356 ) {
3357 // Determine flags.
3358 const Bytecodes::Code code = bytecode();
3359 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3360 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3361 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3362 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3363 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3364 const bool load_receiver = (Rrecv != noreg);
3365 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3366
3367 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
3368 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
3369 assert_different_registers(Rret_addr, Rscratch);
3370
3371 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
3372
3373 // Saving of SP done in call_from_interpreter.
3374
3375 // Maybe push "appendix" to arguments.
3376 if (is_invokedynamic || is_invokehandle) {
3377 Label Ldone;
3378 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
3379 __ beq(CCR0, Ldone);
3380 // Push "appendix" (MethodType, CallSite, etc.).
3381 // This must be done before we get the receiver,
3382 // since the parameter_size includes it.
3383 __ load_resolved_reference_at_index(Rscratch, Rindex);
3384 __ verify_oop(Rscratch);
3385 __ push_ptr(Rscratch);
3386 __ bind(Ldone);
3387 }
3388
3389 // Load receiver if needed (after appendix is pushed so parameter size is correct).
3390 if (load_receiver) {
3391 const Register Rparam_count = Rscratch;
3392 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
3393 __ load_receiver(Rparam_count, Rrecv);
3394 __ verify_oop(Rrecv);
3395 }
3396
3397 // Get return address.
3398 {
3399 Register Rtable_addr = Rscratch;
3400 Register Rret_type = Rret_addr;
3401 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3402
3403 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3404 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3405 __ load_dispatch_table(Rtable_addr, (address*)table_addr);
3406 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3407 // Get return address.
3408 __ ldx(Rret_addr, Rtable_addr, Rret_type);
3409 }
3410 }
3411
3412 // Helper for virtual calls. Load target out of vtable and jump off!
3413 // Kills all passed registers.
3414 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
3415
3416 assert_different_registers(Rrecv_klass, Rtemp, Rret);
3417 const Register Rtarget_method = Rindex;
3418
3419 // Get target method & entry point.
3420 const int base = in_bytes(Klass::vtable_start_offset());
3421 // Calc vtable addr scale the vtable index by 8.
3422 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size_in_bytes()));
3423 // Load target.
3424 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
3425 __ ldx(Rtarget_method, Rindex, Rrecv_klass);
3426 // Argument and return type profiling.
3427 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
3428 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
3429 }
3430
3431 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
3432 void TemplateTable::invokevirtual(int byte_no) {
3433 transition(vtos, vtos);
3434
3435 Register Rtable_addr = R11_scratch1,
3436 Rret_type = R12_scratch2,
3437 Rret_addr = R5_ARG3,
3438 Rflags = R22_tmp2, // Should survive C call.
3439 Rrecv = R3_ARG1,
3440 Rrecv_klass = Rrecv,
3441 Rvtableindex_or_method = R31, // Should survive C call.
3442 Rnum_params = R4_ARG2,
3443 Rnew_bc = R6_ARG4;
3444
3445 Label LnotFinal;
3446
3447 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3448
3449 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3450 __ bfalse(CCR0, LnotFinal);
3451
3452 if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
3453 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3454 }
3455 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3456
3457 __ align(32, 12);
3458 __ bind(LnotFinal);
3459 // Load "this" pointer (receiver).
3460 __ rldicl(Rnum_params, Rflags, 64, 48);
3461 __ load_receiver(Rnum_params, Rrecv);
3462 __ verify_oop(Rrecv);
3463
3464 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3465 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3466 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3467 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3468 __ ldx(Rret_addr, Rret_type, Rtable_addr);
3469 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3470 __ load_klass(Rrecv_klass, Rrecv);
3471 __ verify_klass_ptr(Rrecv_klass);
3472 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3473
3474 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
3475 }
3476
3477 void TemplateTable::fast_invokevfinal(int byte_no) {
3478 transition(vtos, vtos);
3479
3480 assert(byte_no == f2_byte, "use this argument");
3481 Register Rflags = R22_tmp2,
3482 Rmethod = R31;
3483 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
3484 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
3485 }
3486
3487 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
3488
3489 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
3490
3491 // Load receiver from stack slot.
3492 Register Rrecv = Rscratch2;
3493 Register Rnum_params = Rrecv;
3494
3495 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
3496 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
3497
3498 // Get return address.
3499 Register Rtable_addr = Rscratch1,
3500 Rret_addr = Rflags,
3501 Rret_type = Rret_addr;
3502 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3503 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3504 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3505 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3506 __ ldx(Rret_addr, Rret_type, Rtable_addr);
3507
3508 // Load receiver and receiver NULL check.
3509 __ load_receiver(Rnum_params, Rrecv);
3510 __ null_check_throw(Rrecv, -1, Rscratch1);
3511
3512 __ profile_final_call(Rrecv, Rscratch1);
3513 // Argument and return type profiling.
3514 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3515
3516 // Do the call.
3517 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
3518 }
3519
3520 void TemplateTable::invokespecial(int byte_no) {
3521 assert(byte_no == f1_byte, "use this argument");
3522 transition(vtos, vtos);
3523
3524 Register Rtable_addr = R3_ARG1,
3525 Rret_addr = R4_ARG2,
3526 Rflags = R5_ARG3,
3527 Rreceiver = R6_ARG4,
3528 Rmethod = R31;
3529
3530 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
3531
3532 // Receiver NULL check.
3533 __ null_check_throw(Rreceiver, -1, R11_scratch1);
3534
3535 __ profile_call(R11_scratch1, R12_scratch2);
3536 // Argument and return type profiling.
3537 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
3538 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
3539 }
3540
3541 void TemplateTable::invokestatic(int byte_no) {
3542 assert(byte_no == f1_byte, "use this argument");
3543 transition(vtos, vtos);
3544
3545 Register Rtable_addr = R3_ARG1,
3546 Rret_addr = R4_ARG2,
3547 Rflags = R5_ARG3;
3548
3549 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
3550
3551 __ profile_call(R11_scratch1, R12_scratch2);
3552 // Argument and return type profiling.
3553 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
3554 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
3555 }
3556
3557 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
3558 Register Rret,
3559 Register Rflags,
3560 Register Rmethod,
3561 Register Rtemp1,
3562 Register Rtemp2) {
3563
3564 assert_different_registers(Rmethod, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
3565 Label LnotFinal;
3566
3567 // Check for vfinal.
3568 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3569 __ bfalse(CCR0, LnotFinal);
3570
3571 Register Rscratch = Rflags; // Rflags is dead now.
3572
3573 // Final call case.
3574 __ profile_final_call(Rtemp1, Rscratch);
3575 // Argument and return type profiling.
3576 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true);
3577 // Do the final call - the index (f2) contains the method.
3578 __ call_from_interpreter(Rmethod, Rret, Rscratch, Rrecv_klass /* scratch */);
3579
3580 // Non-final callc case.
3581 __ bind(LnotFinal);
3582 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
3583 generate_vtable_call(Rrecv_klass, Rmethod, Rret, Rscratch);
3584 }
3585
3586 void TemplateTable::invokeinterface(int byte_no) {
3587 assert(byte_no == f1_byte, "use this argument");
3588 transition(vtos, vtos);
3589
3590 const Register Rscratch1 = R11_scratch1,
3591 Rscratch2 = R12_scratch2,
3592 Rmethod = R6_ARG4,
3593 Rmethod2 = R9_ARG7,
3594 Rinterface_klass = R5_ARG3,
3595 Rret_addr = R8_ARG6,
3596 Rindex = R10_ARG8,
3597 Rreceiver = R3_ARG1,
3598 Rrecv_klass = R4_ARG2,
3599 Rflags = R7_ARG5;
3600
3601 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rmethod, Rreceiver, Rflags, Rscratch1);
3602
3603 // First check for Object case, then private interface method,
3604 // then regular interface method.
3605
3606 // Get receiver klass - this is also a null check
3607 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch2);
3608 __ load_klass(Rrecv_klass, Rreceiver);
3609
3610 // Check corner case object method.
3611 // Special case of invokeinterface called for virtual method of
3612 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
3613 // The invokeinterface was rewritten to a invokevirtual, hence we have
3614 // to handle this corner case.
3615
3616 Label LnotObjectMethod, Lthrow_ame;
3617 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3618 __ bfalse(CCR0, LnotObjectMethod);
3619 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rmethod, Rscratch1, Rscratch2);
3620 __ bind(LnotObjectMethod);
3621
3622 // Check for private method invocation - indicated by vfinal
3623 Label LnotVFinal, L_no_such_interface, L_subtype;
3624
3625 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3626 __ bfalse(CCR0, LnotVFinal);
3627
3628 __ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype);
3629 // If we get here the typecheck failed
3630 __ b(L_no_such_interface);
3631 __ bind(L_subtype);
3632
3633 // do the call
3634
3635 Register Rscratch = Rflags; // Rflags is dead now.
3636
3637 __ profile_final_call(Rscratch1, Rscratch);
3638 __ profile_arguments_type(Rmethod, Rscratch, Rrecv_klass /* scratch */, true);
3639
3640 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch, Rrecv_klass /* scratch */);
3641
3642 __ bind(LnotVFinal);
3643
3644 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, noreg, noreg, Rscratch1, Rscratch2,
3645 L_no_such_interface, /*return_method=*/false);
3646
3647 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
3648
3649 // Find entry point to call.
3650
3651 // Get declaring interface class from method
3652 __ load_method_holder(Rinterface_klass, Rmethod);
3653
3654 // Get itable index from method
3655 __ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod);
3656 __ subfic(Rindex, Rindex, Method::itable_index_max);
3657
3658 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2,
3659 L_no_such_interface);
3660
3661 __ cmpdi(CCR0, Rmethod2, 0);
3662 __ beq(CCR0, Lthrow_ame);
3663 // Found entry. Jump off!
3664 // Argument and return type profiling.
3665 __ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
3666 //__ profile_called_method(Rindex, Rscratch1);
3667 __ call_from_interpreter(Rmethod2, Rret_addr, Rscratch1, Rscratch2);
3668
3669 // Vtable entry was NULL => Throw abstract method error.
3670 __ bind(Lthrow_ame);
3671 // Pass arguments for generating a verbose error message.
3672 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
3673 Rrecv_klass, Rmethod);
3674
3675 // Interface was not found => Throw incompatible class change error.
3676 __ bind(L_no_such_interface);
3677 // Pass arguments for generating a verbose error message.
3678 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
3679 Rrecv_klass, Rinterface_klass);
3680 DEBUG_ONLY( __ should_not_reach_here(); )
3681 }
3682
3683 void TemplateTable::invokedynamic(int byte_no) {
3684 transition(vtos, vtos);
3685
3686 const Register Rret_addr = R3_ARG1,
3687 Rflags = R4_ARG2,
3688 Rmethod = R22_tmp2,
3689 Rscratch1 = R11_scratch1,
3690 Rscratch2 = R12_scratch2;
3691
3692 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
3693
3694 // Profile this call.
3695 __ profile_call(Rscratch1, Rscratch2);
3696
3697 // Off we go. With the new method handles, we don't jump to a method handle
3698 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
3699 // to be the callsite object the bootstrap method returned. This is passed to a
3700 // "link" method which does the dispatch (Most likely just grabs the MH stored
3701 // inside the callsite and does an invokehandle).
3702 // Argument and return type profiling.
3703 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
3704 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3705 }
3706
3707 void TemplateTable::invokehandle(int byte_no) {
3708 transition(vtos, vtos);
3709
3710 const Register Rret_addr = R3_ARG1,
3711 Rflags = R4_ARG2,
3712 Rrecv = R5_ARG3,
3713 Rmethod = R22_tmp2,
3714 Rscratch1 = R11_scratch1,
3715 Rscratch2 = R12_scratch2;
3716
3717 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
3718 __ verify_method_ptr(Rmethod);
3719 __ null_check_throw(Rrecv, -1, Rscratch2);
3720
3721 __ profile_final_call(Rrecv, Rscratch1);
3722
3723 // Still no call from handle => We call the method handle interpreter here.
3724 // Argument and return type profiling.
3725 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3726 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3727 }
3728
3729 // =============================================================================
3730 // Allocation
3731
3732 // Puts allocated obj ref onto the expression stack.
3733 void TemplateTable::_new() {
3734 transition(vtos, atos);
3735
3736 Label Lslow_case,
3737 Ldone;
3738
3739 const Register RallocatedObject = R17_tos,
3740 RinstanceKlass = R9_ARG7,
3741 Rscratch = R11_scratch1,
3742 Roffset = R8_ARG6,
3743 Rinstance_size = Roffset,
3744 Rcpool = R4_ARG2,
3745 Rtags = R3_ARG1,
3746 Rindex = R5_ARG3;
3747
3748 // --------------------------------------------------------------------------
3749 // Check if fast case is possible.
3750
3751 // Load pointers to const pool and const pool's tags array.
3752 __ get_cpool_and_tags(Rcpool, Rtags);
3753 // Load index of constant pool entry.
3754 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
3755
3756 // Note: compared to other architectures, PPC's implementation always goes
3757 // to the slow path if TLAB is used and fails.
3758 if (UseTLAB) {
3759 // Make sure the class we're about to instantiate has been resolved
3760 // This is done before loading instanceKlass to be consistent with the order
3761 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
3762 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3763 __ lbzx(Rtags, Rindex, Rtags);
3764
3765 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3766 __ bne(CCR0, Lslow_case);
3767
3768 // Get instanceKlass
3769 __ sldi(Roffset, Rindex, LogBytesPerWord);
3770 __ load_resolved_klass_at_offset(Rcpool, Roffset, RinstanceKlass);
3771
3772 // Make sure klass is fully initialized and get instance_size.
3773 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
3774 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
3775
3776 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
3777 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
3778 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
3779
3780 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized?
3781 __ beq(CCR0, Lslow_case);
3782
3783 // --------------------------------------------------------------------------
3784 // Fast case:
3785 // Allocate the instance.
3786 // 1) Try to allocate in the TLAB.
3787 // 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
3788
3789 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
3790 Register RnewTopValue = R6_ARG4;
3791 Register RendValue = R7_ARG5;
3792
3793 // Check if we can allocate in the TLAB.
3794 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3795 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread);
3796
3797 __ add(RnewTopValue, Rinstance_size, RoldTopValue);
3798
3799 // If there is enough space, we do not CAS and do not clear.
3800 __ cmpld(CCR0, RnewTopValue, RendValue);
3801 __ bgt(CCR0, Lslow_case);
3802
3803 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3804
3805 if (!ZeroTLAB) {
3806 // --------------------------------------------------------------------------
3807 // Init1: Zero out newly allocated memory.
3808 // Initialize remaining object fields.
3809 Register Rbase = Rtags;
3810 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
3811 __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
3812 __ srdi(Rinstance_size, Rinstance_size, 3);
3813
3814 // Clear out object skipping header. Takes also care of the zero length case.
3815 __ clear_memory_doubleword(Rbase, Rinstance_size);
3816 }
3817
3818 // --------------------------------------------------------------------------
3819 // Init2: Initialize the header: mark, klass
3820 // Init mark.
3821 if (UseBiasedLocking) {
3822 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
3823 } else {
3824 __ load_const_optimized(Rscratch, markWord::prototype().value(), R0);
3825 }
3826 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
3827
3828 // Init klass.
3829 __ store_klass_gap(RallocatedObject);
3830 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
3831
3832 // Check and trigger dtrace event.
3833 SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone);
3834 __ push(atos);
3835 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
3836 __ pop(atos);
3837
3838 __ b(Ldone);
3839 }
3840
3841 // --------------------------------------------------------------------------
3842 // slow case
3843 __ bind(Lslow_case);
3844 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
3845
3846 // continue
3847 __ bind(Ldone);
3848
3849 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3850 __ membar(Assembler::StoreStore);
3851 }
3852
3853 void TemplateTable::newarray() {
3854 transition(itos, atos);
3855
3856 __ lbz(R4, 1, R14_bcp);
3857 __ extsw(R5, R17_tos);
3858 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
3859
3860 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3861 __ membar(Assembler::StoreStore);
3862 }
3863
3864 void TemplateTable::anewarray() {
3865 transition(itos, atos);
3866
3867 __ get_constant_pool(R4);
3868 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
3869 __ extsw(R6, R17_tos); // size
3870 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
3871
3872 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3873 __ membar(Assembler::StoreStore);
3874 }
3875
3876 // Allocate a multi dimensional array
3877 void TemplateTable::multianewarray() {
3878 transition(vtos, atos);
3879
3880 Register Rptr = R31; // Needs to survive C call.
3881
3882 // Put ndims * wordSize into frame temp slot
3883 __ lbz(Rptr, 3, R14_bcp);
3884 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
3885 // Esp points past last_dim, so set to R4 to first_dim address.
3886 __ add(R4, Rptr, R15_esp);
3887 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
3888 // Pop all dimensions off the stack.
3889 __ add(R15_esp, Rptr, R15_esp);
3890
3891 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3892 __ membar(Assembler::StoreStore);
3893 }
3894
3895 void TemplateTable::arraylength() {
3896 transition(atos, itos);
3897
3898 __ verify_oop(R17_tos);
3899 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
3900 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
3901 }
3902
3903 // ============================================================================
3904 // Typechecks
3905
3906 void TemplateTable::checkcast() {
3907 transition(atos, atos);
3908
3909 Label Ldone, Lis_null, Lquicked, Lresolved;
3910 Register Roffset = R6_ARG4,
3911 RobjKlass = R4_ARG2,
3912 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
3913 Rcpool = R11_scratch1,
3914 Rtags = R12_scratch2;
3915
3916 // Null does not pass.
3917 __ cmpdi(CCR0, R17_tos, 0);
3918 __ beq(CCR0, Lis_null);
3919
3920 // Get constant pool tag to find out if the bytecode has already been "quickened".
3921 __ get_cpool_and_tags(Rcpool, Rtags);
3922
3923 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3924
3925 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3926 __ lbzx(Rtags, Rtags, Roffset);
3927
3928 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3929 __ beq(CCR0, Lquicked);
3930
3931 // Call into the VM to "quicken" instanceof.
3932 __ push_ptr(); // for GC
3933 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3934 __ get_vm_result_2(RspecifiedKlass);
3935 __ pop_ptr(); // Restore receiver.
3936 __ b(Lresolved);
3937
3938 // Extract target class from constant pool.
3939 __ bind(Lquicked);
3940 __ sldi(Roffset, Roffset, LogBytesPerWord);
3941 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
3942
3943 // Do the checkcast.
3944 __ bind(Lresolved);
3945 // Get value klass in RobjKlass.
3946 __ load_klass(RobjKlass, R17_tos);
3947 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3948 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3949
3950 // Not a subtype; so must throw exception
3951 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
3952 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
3953 __ mtctr(R11_scratch1);
3954 __ bctr();
3955
3956 // Profile the null case.
3957 __ align(32, 12);
3958 __ bind(Lis_null);
3959 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
3960
3961 __ align(32, 12);
3962 __ bind(Ldone);
3963 }
3964
3965 // Output:
3966 // - tos == 0: Obj was null or not an instance of class.
3967 // - tos == 1: Obj was an instance of class.
3968 void TemplateTable::instanceof() {
3969 transition(atos, itos);
3970
3971 Label Ldone, Lis_null, Lquicked, Lresolved;
3972 Register Roffset = R6_ARG4,
3973 RobjKlass = R4_ARG2,
3974 RspecifiedKlass = R5_ARG3,
3975 Rcpool = R11_scratch1,
3976 Rtags = R12_scratch2;
3977
3978 // Null does not pass.
3979 __ cmpdi(CCR0, R17_tos, 0);
3980 __ beq(CCR0, Lis_null);
3981
3982 // Get constant pool tag to find out if the bytecode has already been "quickened".
3983 __ get_cpool_and_tags(Rcpool, Rtags);
3984
3985 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3986
3987 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3988 __ lbzx(Rtags, Rtags, Roffset);
3989
3990 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3991 __ beq(CCR0, Lquicked);
3992
3993 // Call into the VM to "quicken" instanceof.
3994 __ push_ptr(); // for GC
3995 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3996 __ get_vm_result_2(RspecifiedKlass);
3997 __ pop_ptr(); // Restore receiver.
3998 __ b(Lresolved);
3999
4000 // Extract target class from constant pool.
4001 __ bind(Lquicked);
4002 __ sldi(Roffset, Roffset, LogBytesPerWord);
4003 __ load_resolved_klass_at_offset(Rcpool, Roffset, RspecifiedKlass);
4004
4005 // Do the checkcast.
4006 __ bind(Lresolved);
4007 // Get value klass in RobjKlass.
4008 __ load_klass(RobjKlass, R17_tos);
4009 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
4010 __ li(R17_tos, 1);
4011 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
4012 __ li(R17_tos, 0);
4013
4014 if (ProfileInterpreter) {
4015 __ b(Ldone);
4016 }
4017
4018 // Profile the null case.
4019 __ align(32, 12);
4020 __ bind(Lis_null);
4021 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
4022
4023 __ align(32, 12);
4024 __ bind(Ldone);
4025 }
4026
4027 // =============================================================================
4028 // Breakpoints
4029
4030 void TemplateTable::_breakpoint() {
4031 transition(vtos, vtos);
4032
4033 // Get the unpatched byte code.
4034 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
4035 __ mr(R31, R3_RET);
4036
4037 // Post the breakpoint event.
4038 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
4039
4040 // Complete the execution of original bytecode.
4041 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
4042 }
4043
4044 // =============================================================================
4045 // Exceptions
4046
4047 void TemplateTable::athrow() {
4048 transition(atos, vtos);
4049
4050 // Exception oop is in tos
4051 __ verify_oop(R17_tos);
4052
4053 __ null_check_throw(R17_tos, -1, R11_scratch1);
4054
4055 // Throw exception interpreter entry expects exception oop to be in R3.
4056 __ mr(R3_RET, R17_tos);
4057 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
4058 __ mtctr(R11_scratch1);
4059 __ bctr();
4060 }
4061
4062 // =============================================================================
4063 // Synchronization
4064 // Searches the basic object lock list on the stack for a free slot
4065 // and uses it to lock the obect in tos.
4066 //
4067 // Recursive locking is enabled by exiting the search if the same
4068 // object is already found in the list. Thus, a new basic lock obj lock
4069 // is allocated "higher up" in the stack and thus is found first
4070 // at next monitor exit.
4071 void TemplateTable::monitorenter() {
4072 transition(atos, vtos);
4073
4074 __ verify_oop(R17_tos);
4075
4076 Register Rcurrent_monitor = R11_scratch1,
4077 Rcurrent_obj = R12_scratch2,
4078 Robj_to_lock = R17_tos,
4079 Rscratch1 = R3_ARG1,
4080 Rscratch2 = R4_ARG2,
4081 Rscratch3 = R5_ARG3,
4082 Rcurrent_obj_addr = R6_ARG4;
4083
4084 // ------------------------------------------------------------------------------
4085 // Null pointer exception.
4086 __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4087
4088 // Try to acquire a lock on the object.
4089 // Repeat until succeeded (i.e., until monitorenter returns true).
4090
4091 // ------------------------------------------------------------------------------
4092 // Find a free slot in the monitor block.
4093 Label Lfound, Lexit, Lallocate_new;
4094 ConditionRegister found_free_slot = CCR0,
4095 found_same_obj = CCR1,
4096 reached_limit = CCR6;
4097 {
4098 Label Lloop;
4099 Register Rlimit = Rcurrent_monitor;
4100
4101 // Set up search loop - start with topmost monitor.
4102 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
4103
4104 __ ld(Rlimit, 0, R1_SP);
4105 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
4106
4107 // Check if any slot is present => short cut to allocation if not.
4108 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
4109 __ bgt(reached_limit, Lallocate_new);
4110
4111 // Pre-load topmost slot.
4112 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4113 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4114 // The search loop.
4115 __ bind(Lloop);
4116 // Found free slot?
4117 __ cmpdi(found_free_slot, Rcurrent_obj, 0);
4118 // Is this entry for same obj? If so, stop the search and take the found
4119 // free slot or allocate a new one to enable recursive locking.
4120 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
4121 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
4122 __ beq(found_free_slot, Lexit);
4123 __ beq(found_same_obj, Lallocate_new);
4124 __ bgt(reached_limit, Lallocate_new);
4125 // Check if last allocated BasicLockObj reached.
4126 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4127 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4128 // Next iteration if unchecked BasicObjectLocks exist on the stack.
4129 __ b(Lloop);
4130 }
4131
4132 // ------------------------------------------------------------------------------
4133 // Check if we found a free slot.
4134 __ bind(Lexit);
4135
4136 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4137 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
4138 __ b(Lfound);
4139
4140 // We didn't find a free BasicObjLock => allocate one.
4141 __ align(32, 12);
4142 __ bind(Lallocate_new);
4143 __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
4144 __ mr(Rcurrent_monitor, R26_monitor);
4145 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4146
4147 // ------------------------------------------------------------------------------
4148 // We now have a slot to lock.
4149 __ bind(Lfound);
4150
4151 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4152 // The object has already been poped from the stack, so the expression stack looks correct.
4153 __ addi(R14_bcp, R14_bcp, 1);
4154
4155 __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
4156 __ lock_object(Rcurrent_monitor, Robj_to_lock);
4157
4158 // Check if there's enough space on the stack for the monitors after locking.
4159 // This emits a single store.
4160 __ generate_stack_overflow_check(0);
4161
4162 // The bcp has already been incremented. Just need to dispatch to next instruction.
4163 __ dispatch_next(vtos);
4164 }
4165
4166 void TemplateTable::monitorexit() {
4167 transition(atos, vtos);
4168 __ verify_oop(R17_tos);
4169
4170 Register Rcurrent_monitor = R11_scratch1,
4171 Rcurrent_obj = R12_scratch2,
4172 Robj_to_lock = R17_tos,
4173 Rcurrent_obj_addr = R3_ARG1,
4174 Rlimit = R4_ARG2;
4175 Label Lfound, Lillegal_monitor_state;
4176
4177 // Check corner case: unbalanced monitorEnter / Exit.
4178 __ ld(Rlimit, 0, R1_SP);
4179 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
4180
4181 // Null pointer check.
4182 __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4183
4184 __ cmpld(CCR0, R26_monitor, Rlimit);
4185 __ bgt(CCR0, Lillegal_monitor_state);
4186
4187 // Find the corresponding slot in the monitors stack section.
4188 {
4189 Label Lloop;
4190
4191 // Start with topmost monitor.
4192 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4193 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
4194 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4195 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4196
4197 __ bind(Lloop);
4198 // Is this entry for same obj?
4199 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
4200 __ beq(CCR0, Lfound);
4201
4202 // Check if last allocated BasicLockObj reached.
4203
4204 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4205 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
4206 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4207
4208 // Next iteration if unchecked BasicObjectLocks exist on the stack.
4209 __ ble(CCR0, Lloop);
4210 }
4211
4212 // Fell through without finding the basic obj lock => throw up!
4213 __ bind(Lillegal_monitor_state);
4214 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4215 __ should_not_reach_here();
4216
4217 __ align(32, 12);
4218 __ bind(Lfound);
4219 __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
4220 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4221 __ unlock_object(Rcurrent_monitor);
4222 }
4223
4224 // ============================================================================
4225 // Wide bytecodes
4226
4227 // Wide instructions. Simply redirects to the wide entry point for that instruction.
4228 void TemplateTable::wide() {
4229 transition(vtos, vtos);
4230
4231 const Register Rtable = R11_scratch1,
4232 Rindex = R12_scratch2,
4233 Rtmp = R0;
4234
4235 __ lbz(Rindex, 1, R14_bcp);
4236
4237 __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
4238
4239 __ slwi(Rindex, Rindex, LogBytesPerWord);
4240 __ ldx(Rtmp, Rtable, Rindex);
4241 __ mtctr(Rtmp);
4242 __ bctr();
4243 // Note: the bcp increment step is part of the individual wide bytecode implementations.
4244 }
--- EOF ---